2026-03-09T19:46:41.763 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-09T19:46:41.770 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T19:46:41.793 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/628 branch: squid description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} email: null first_in_suite: false flavor: default job_id: '628' last_in_suite: false machine_type: vps name: kyr-2026-03-09_11:23:05-orch-squid-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 1 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: false mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_STRAY_DAEMON - CEPHADM_FAILED_DAEMON - CEPHADM_AGENT_DOWN log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - grafana.a - node-exporter.b seed: 3443 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAapH/iivjdTU1f48YIdWjcXz4Pbcfip8GO7Pc8zxH78e5U7FOTb6VAXC/P02xGknGUj6iSNJgwE3WACmriKUgU= vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCFcRxyNVVw0MuDk0RuNxnzWbAhsrYyLJQxqZA2JNdrWlfNRwhEgT93pwIKVLvu3I64ufiVmQzb1B6YaNlSVlgg= tasks: - cephadm: cephadm_branch: v17.2.0 cephadm_git_url: https://github.com/ceph/ceph image: quay.io/ceph/ceph:v17.2.0 - cephadm.shell: mon.a: - ceph config set mgr mgr/cephadm/use_repo_digest false --force - cephadm.shell: env: - sha1 mon.a: - radosgw-admin realm create --rgw-realm=r --default - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default - radosgw-admin period update --rgw-realm=r --commit - ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000 - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - sleep 180 - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph orch ps - ceph versions - ceph -s - ceph orch ls - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1 - ceph orch ps --refresh - sleep 180 - ceph orch ps - ceph versions - ceph -s - ceph health detail - ceph versions | jq -e '.mgr | length == 2' - ceph mgr fail - sleep 180 - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1 - ceph orch ps --refresh - sleep 180 - ceph orch ps - ceph versions - ceph health detail - ceph -s - ceph mgr fail - sleep 180 - ceph orch ps - ceph versions - ceph -s - ceph health detail - ceph versions | jq -e '.mgr | length == 1' - ceph mgr fail - sleep 180 - ceph orch ps - ceph orch ls - ceph versions - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph versions | jq -e '.mgr | length == 1' - ceph versions | jq -e '.mgr | keys' | grep $sha1 - ceph versions | jq -e '.overall | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 2' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.x | awk '{print $2}') - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.mon | length == 2' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.y | awk '{print $2}') - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.mon | length == 1' - ceph versions | jq -e '.mon | keys' | grep $sha1 - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 5' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 2 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 7' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd --limit 1 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 8' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 1' - ceph versions | jq -e '.osd | keys' | grep $sha1 - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --services rgw.foo - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.rgw | length == 1' - ceph versions | jq -e '.rgw | keys' | grep $sha1 - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: - sha1 mon.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph versions - ceph orch upgrade status - ceph health detail - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 - ceph orch ls | grep '^osd ' - cephadm.shell: mon.a: - ceph orch upgrade ls - ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0 - ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2 teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-09_11:23:05 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-09T19:46:41.793 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-09T19:46:41.793 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-09T19:46:41.793 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-09T19:46:41.794 INFO:teuthology.task.internal:Checking packages... 2026-03-09T19:46:41.794 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-09T19:46:41.794 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-09T19:46:41.794 INFO:teuthology.packaging:ref: None 2026-03-09T19:46:41.794 INFO:teuthology.packaging:tag: None 2026-03-09T19:46:41.794 INFO:teuthology.packaging:branch: squid 2026-03-09T19:46:41.794 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T19:46:41.794 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-09T19:46:42.567 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-09T19:46:42.568 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-09T19:46:42.569 INFO:teuthology.task.internal:no buildpackages task found 2026-03-09T19:46:42.569 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-09T19:46:42.569 INFO:teuthology.task.internal:Saving configuration 2026-03-09T19:46:42.578 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-09T19:46:42.579 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-09T19:46:42.587 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm01.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/628', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 19:45:27.920115', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:01', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAapH/iivjdTU1f48YIdWjcXz4Pbcfip8GO7Pc8zxH78e5U7FOTb6VAXC/P02xGknGUj6iSNJgwE3WACmriKUgU='} 2026-03-09T19:46:42.593 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm06.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/628', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 19:45:27.919498', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:06', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCFcRxyNVVw0MuDk0RuNxnzWbAhsrYyLJQxqZA2JNdrWlfNRwhEgT93pwIKVLvu3I64ufiVmQzb1B6YaNlSVlgg='} 2026-03-09T19:46:42.593 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-09T19:46:42.594 INFO:teuthology.task.internal:roles: ubuntu@vm01.local - ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a', 'alertmanager.a'] 2026-03-09T19:46:42.594 INFO:teuthology.task.internal:roles: ubuntu@vm06.local - ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b'] 2026-03-09T19:46:42.594 INFO:teuthology.run_tasks:Running task console_log... 2026-03-09T19:46:42.600 DEBUG:teuthology.task.console_log:vm01 does not support IPMI; excluding 2026-03-09T19:46:42.606 DEBUG:teuthology.task.console_log:vm06 does not support IPMI; excluding 2026-03-09T19:46:42.606 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f94f967e170>, signals=[15]) 2026-03-09T19:46:42.606 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-09T19:46:42.607 INFO:teuthology.task.internal:Opening connections... 2026-03-09T19:46:42.607 DEBUG:teuthology.task.internal:connecting to ubuntu@vm01.local 2026-03-09T19:46:42.607 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T19:46:42.671 DEBUG:teuthology.task.internal:connecting to ubuntu@vm06.local 2026-03-09T19:46:42.672 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T19:46:42.732 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-09T19:46:42.733 DEBUG:teuthology.orchestra.run.vm01:> uname -m 2026-03-09T19:46:42.793 INFO:teuthology.orchestra.run.vm01.stdout:x86_64 2026-03-09T19:46:42.794 DEBUG:teuthology.orchestra.run.vm01:> cat /etc/os-release 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:NAME="CentOS Stream" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:VERSION="9" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:ID="centos" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:ID_LIKE="rhel fedora" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:VERSION_ID="9" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:PLATFORM_ID="platform:el9" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:ANSI_COLOR="0;31" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:LOGO="fedora-logo-icon" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:HOME_URL="https://centos.org/" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T19:46:42.848 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T19:46:42.848 INFO:teuthology.lock.ops:Updating vm01.local on lock server 2026-03-09T19:46:42.853 DEBUG:teuthology.orchestra.run.vm06:> uname -m 2026-03-09T19:46:42.868 INFO:teuthology.orchestra.run.vm06.stdout:x86_64 2026-03-09T19:46:42.868 DEBUG:teuthology.orchestra.run.vm06:> cat /etc/os-release 2026-03-09T19:46:42.925 INFO:teuthology.orchestra.run.vm06.stdout:NAME="CentOS Stream" 2026-03-09T19:46:42.925 INFO:teuthology.orchestra.run.vm06.stdout:VERSION="9" 2026-03-09T19:46:42.925 INFO:teuthology.orchestra.run.vm06.stdout:ID="centos" 2026-03-09T19:46:42.925 INFO:teuthology.orchestra.run.vm06.stdout:ID_LIKE="rhel fedora" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_ID="9" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:PLATFORM_ID="platform:el9" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:ANSI_COLOR="0;31" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:LOGO="fedora-logo-icon" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:HOME_URL="https://centos.org/" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T19:46:42.926 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T19:46:42.926 INFO:teuthology.lock.ops:Updating vm06.local on lock server 2026-03-09T19:46:42.930 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-09T19:46:42.932 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-09T19:46:42.933 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-09T19:46:42.933 DEBUG:teuthology.orchestra.run.vm01:> test '!' -e /home/ubuntu/cephtest 2026-03-09T19:46:42.935 DEBUG:teuthology.orchestra.run.vm06:> test '!' -e /home/ubuntu/cephtest 2026-03-09T19:46:42.979 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-09T19:46:42.980 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-09T19:46:42.980 DEBUG:teuthology.orchestra.run.vm01:> test -z $(ls -A /var/lib/ceph) 2026-03-09T19:46:42.992 DEBUG:teuthology.orchestra.run.vm06:> test -z $(ls -A /var/lib/ceph) 2026-03-09T19:46:43.008 INFO:teuthology.orchestra.run.vm01.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T19:46:43.035 INFO:teuthology.orchestra.run.vm06.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T19:46:43.035 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-09T19:46:43.043 DEBUG:teuthology.orchestra.run.vm01:> test -e /ceph-qa-ready 2026-03-09T19:46:43.066 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T19:46:43.276 DEBUG:teuthology.orchestra.run.vm06:> test -e /ceph-qa-ready 2026-03-09T19:46:43.293 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T19:46:43.477 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-09T19:46:43.479 INFO:teuthology.task.internal:Creating test directory... 2026-03-09T19:46:43.479 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T19:46:43.481 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T19:46:43.495 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-09T19:46:43.496 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-09T19:46:43.497 INFO:teuthology.task.internal:Creating archive directory... 2026-03-09T19:46:43.497 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T19:46:43.537 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T19:46:43.553 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-09T19:46:43.555 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-09T19:46:43.555 DEBUG:teuthology.orchestra.run.vm01:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T19:46:43.605 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T19:46:43.605 DEBUG:teuthology.orchestra.run.vm06:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T19:46:43.618 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T19:46:43.618 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T19:46:43.647 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T19:46:43.669 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T19:46:43.677 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T19:46:43.682 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T19:46:43.690 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T19:46:43.692 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-09T19:46:43.693 INFO:teuthology.task.internal:Configuring sudo... 2026-03-09T19:46:43.693 DEBUG:teuthology.orchestra.run.vm01:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T19:46:43.720 DEBUG:teuthology.orchestra.run.vm06:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T19:46:43.757 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-09T19:46:43.759 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-09T19:46:43.759 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T19:46:43.785 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T19:46:43.812 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T19:46:43.867 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T19:46:43.925 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:46:43.925 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T19:46:43.989 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T19:46:44.018 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T19:46:44.078 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T19:46:44.078 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T19:46:44.144 DEBUG:teuthology.orchestra.run.vm01:> sudo service rsyslog restart 2026-03-09T19:46:44.146 DEBUG:teuthology.orchestra.run.vm06:> sudo service rsyslog restart 2026-03-09T19:46:44.172 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T19:46:44.216 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T19:46:44.618 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-09T19:46:44.619 INFO:teuthology.task.internal:Starting timer... 2026-03-09T19:46:44.619 INFO:teuthology.run_tasks:Running task pcp... 2026-03-09T19:46:44.622 INFO:teuthology.run_tasks:Running task selinux... 2026-03-09T19:46:44.624 INFO:teuthology.task.selinux:Excluding vm01: VMs are not yet supported 2026-03-09T19:46:44.624 INFO:teuthology.task.selinux:Excluding vm06: VMs are not yet supported 2026-03-09T19:46:44.624 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-09T19:46:44.624 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-09T19:46:44.624 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-09T19:46:44.624 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-09T19:46:44.626 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-09T19:46:44.627 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-09T19:46:44.628 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-09T19:46:45.284 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-09T19:46:45.290 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-09T19:46:45.290 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventorykd1ytrma --limit vm01.local,vm06.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-09T19:49:14.229 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm01.local'), Remote(name='ubuntu@vm06.local')] 2026-03-09T19:49:14.229 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm01.local' 2026-03-09T19:49:14.230 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T19:49:14.300 DEBUG:teuthology.orchestra.run.vm01:> true 2026-03-09T19:49:14.378 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm01.local' 2026-03-09T19:49:14.378 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm06.local' 2026-03-09T19:49:14.378 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T19:49:14.440 DEBUG:teuthology.orchestra.run.vm06:> true 2026-03-09T19:49:14.523 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm06.local' 2026-03-09T19:49:14.523 INFO:teuthology.run_tasks:Running task clock... 2026-03-09T19:49:14.526 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-09T19:49:14.526 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T19:49:14.526 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T19:49:14.528 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T19:49:14.528 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T19:49:14.569 INFO:teuthology.orchestra.run.vm01.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T19:49:14.592 INFO:teuthology.orchestra.run.vm01.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T19:49:14.598 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T19:49:14.613 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T19:49:14.627 INFO:teuthology.orchestra.run.vm01.stderr:sudo: ntpd: command not found 2026-03-09T19:49:14.640 INFO:teuthology.orchestra.run.vm06.stderr:sudo: ntpd: command not found 2026-03-09T19:49:14.645 INFO:teuthology.orchestra.run.vm01.stdout:506 Cannot talk to daemon 2026-03-09T19:49:14.654 INFO:teuthology.orchestra.run.vm06.stdout:506 Cannot talk to daemon 2026-03-09T19:49:14.665 INFO:teuthology.orchestra.run.vm01.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T19:49:14.671 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T19:49:14.680 INFO:teuthology.orchestra.run.vm01.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T19:49:14.688 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T19:49:14.732 INFO:teuthology.orchestra.run.vm01.stderr:bash: line 1: ntpq: command not found 2026-03-09T19:49:14.736 INFO:teuthology.orchestra.run.vm01.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T19:49:14.736 INFO:teuthology.orchestra.run.vm01.stdout:=============================================================================== 2026-03-09T19:49:14.736 INFO:teuthology.orchestra.run.vm01.stdout:^? node-4.infogral.is 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T19:49:14.736 INFO:teuthology.orchestra.run.vm01.stdout:^? stratum2-2.NTP.TechFak.N> 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T19:49:14.736 INFO:teuthology.orchestra.run.vm01.stdout:^? pve2.h4x-gamers.top 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T19:49:14.736 INFO:teuthology.orchestra.run.vm01.stdout:^? time.cloudflare.com 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T19:49:14.738 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-09T19:49:14.741 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T19:49:14.741 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-09T19:49:14.741 INFO:teuthology.orchestra.run.vm06.stdout:^? node-4.infogral.is 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T19:49:14.741 INFO:teuthology.orchestra.run.vm06.stdout:^? stratum2-2.NTP.TechFak.N> 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T19:49:14.741 INFO:teuthology.orchestra.run.vm06.stdout:^? pve2.h4x-gamers.top 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T19:49:14.741 INFO:teuthology.orchestra.run.vm06.stdout:^? time.cloudflare.com 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T19:49:14.741 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-09T19:49:14.796 INFO:tasks.cephadm:Config: {'cephadm_branch': 'v17.2.0', 'cephadm_git_url': 'https://github.com/ceph/ceph', 'image': 'quay.io/ceph/ceph:v17.2.0', 'conf': {'global': {'mon election default strategy': 1}, 'mgr': {'debug mgr': 20, 'debug ms': 1, 'mgr/cephadm/use_agent': False}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_STRAY_DAEMON', 'CEPHADM_FAILED_DAEMON', 'CEPHADM_AGENT_DOWN'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-09T19:49:14.796 INFO:tasks.cephadm:Cluster image is quay.io/ceph/ceph:v17.2.0 2026-03-09T19:49:14.796 INFO:tasks.cephadm:Cluster fsid is 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:49:14.796 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-09T19:49:14.796 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.101', 'mon.c': '[v2:192.168.123.101:3301,v1:192.168.123.101:6790]', 'mon.b': '192.168.123.106'} 2026-03-09T19:49:14.796 INFO:tasks.cephadm:First mon is mon.a on vm01 2026-03-09T19:49:14.796 INFO:tasks.cephadm:First mgr is y 2026-03-09T19:49:14.796 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-09T19:49:14.796 DEBUG:teuthology.orchestra.run.vm01:> sudo hostname $(hostname -s) 2026-03-09T19:49:14.838 DEBUG:teuthology.orchestra.run.vm06:> sudo hostname $(hostname -s) 2026-03-09T19:49:14.880 INFO:tasks.cephadm:Downloading cephadm (repo https://github.com/ceph/ceph ref v17.2.0)... 2026-03-09T19:49:14.881 DEBUG:teuthology.orchestra.run.vm01:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T19:49:15.171 INFO:teuthology.orchestra.run.vm01.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 9 19:49 /home/ubuntu/cephtest/cephadm 2026-03-09T19:49:15.171 DEBUG:teuthology.orchestra.run.vm06:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T19:49:15.256 INFO:teuthology.orchestra.run.vm06.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 9 19:49 /home/ubuntu/cephtest/cephadm 2026-03-09T19:49:15.256 DEBUG:teuthology.orchestra.run.vm01:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T19:49:15.279 DEBUG:teuthology.orchestra.run.vm06:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T19:49:15.305 INFO:tasks.cephadm:Pulling image quay.io/ceph/ceph:v17.2.0 on all hosts... 2026-03-09T19:49:15.305 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-09T19:49:15.322 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-09T19:49:16.169 INFO:teuthology.orchestra.run.vm06.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-09T19:49:16.269 INFO:teuthology.orchestra.run.vm01.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-09T19:50:52.865 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-03-09T19:50:52.865 INFO:teuthology.orchestra.run.vm06.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-09T19:50:52.865 INFO:teuthology.orchestra.run.vm06.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-09T19:50:52.865 INFO:teuthology.orchestra.run.vm06.stdout: "repo_digests": [ 2026-03-09T19:50:52.865 INFO:teuthology.orchestra.run.vm06.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-09T19:50:52.865 INFO:teuthology.orchestra.run.vm06.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-09T19:50:52.865 INFO:teuthology.orchestra.run.vm06.stdout: ] 2026-03-09T19:50:52.865 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-03-09T19:50:52.901 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T19:50:52.901 INFO:teuthology.orchestra.run.vm01.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-09T19:50:52.901 INFO:teuthology.orchestra.run.vm01.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-09T19:50:52.901 INFO:teuthology.orchestra.run.vm01.stdout: "repo_digests": [ 2026-03-09T19:50:52.901 INFO:teuthology.orchestra.run.vm01.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-09T19:50:52.901 INFO:teuthology.orchestra.run.vm01.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-09T19:50:52.901 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-03-09T19:50:52.901 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T19:50:52.915 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /etc/ceph 2026-03-09T19:50:52.942 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /etc/ceph 2026-03-09T19:50:52.971 DEBUG:teuthology.orchestra.run.vm01:> sudo chmod 777 /etc/ceph 2026-03-09T19:50:53.007 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 777 /etc/ceph 2026-03-09T19:50:53.037 INFO:tasks.cephadm:Writing seed config... 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [global] mon election default strategy = 1 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [mgr] mgr/cephadm/use_agent = False 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-09T19:50:53.037 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-09T19:50:53.037 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:50:53.037 DEBUG:teuthology.orchestra.run.vm01:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-09T19:50:53.062 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 0d36e446-1bf1-11f1-a427-9918c6d73ee3 mon election default strategy = 1 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 mgr/cephadm/use_agent = False [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-09T19:50:53.062 DEBUG:teuthology.orchestra.run.vm01:mon.a> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.a.service 2026-03-09T19:50:53.103 DEBUG:teuthology.orchestra.run.vm01:mgr.y> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.y.service 2026-03-09T19:50:53.145 INFO:tasks.cephadm:Bootstrapping... 2026-03-09T19:50:53.145 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 -v bootstrap --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.101 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-09T19:50:53.285 INFO:teuthology.orchestra.run.vm01.stderr:-------------------------------------------------------------------------------- 2026-03-09T19:50:53.285 INFO:teuthology.orchestra.run.vm01.stderr:cephadm ['--image', 'quay.io/ceph/ceph:v17.2.0', '-v', 'bootstrap', '--fsid', '0d36e446-1bf1-11f1-a427-9918c6d73ee3', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'y', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.101', '--skip-admin-label'] 2026-03-09T19:50:53.303 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: 5.8.0 2026-03-09T19:50:53.306 INFO:teuthology.orchestra.run.vm01.stderr:Verifying podman|docker is present... 2026-03-09T19:50:53.321 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: 5.8.0 2026-03-09T19:50:53.323 INFO:teuthology.orchestra.run.vm01.stderr:Verifying lvm2 is present... 2026-03-09T19:50:53.323 INFO:teuthology.orchestra.run.vm01.stderr:Verifying time synchronization is in place... 2026-03-09T19:50:53.330 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T19:50:53.336 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: inactive 2026-03-09T19:50:53.343 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: enabled 2026-03-09T19:50:53.349 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: active 2026-03-09T19:50:53.350 INFO:teuthology.orchestra.run.vm01.stderr:Unit chronyd.service is enabled and running 2026-03-09T19:50:53.350 INFO:teuthology.orchestra.run.vm01.stderr:Repeating the final host check... 2026-03-09T19:50:53.365 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: 5.8.0 2026-03-09T19:50:53.368 INFO:teuthology.orchestra.run.vm01.stderr:podman (/bin/podman) version 5.8.0 is present 2026-03-09T19:50:53.368 INFO:teuthology.orchestra.run.vm01.stderr:systemctl is present 2026-03-09T19:50:53.368 INFO:teuthology.orchestra.run.vm01.stderr:lvcreate is present 2026-03-09T19:50:53.374 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T19:50:53.380 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: inactive 2026-03-09T19:50:53.386 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: enabled 2026-03-09T19:50:53.392 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: active 2026-03-09T19:50:53.393 INFO:teuthology.orchestra.run.vm01.stderr:Unit chronyd.service is enabled and running 2026-03-09T19:50:53.393 INFO:teuthology.orchestra.run.vm01.stderr:Host looks OK 2026-03-09T19:50:53.393 INFO:teuthology.orchestra.run.vm01.stderr:Cluster fsid: 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:50:53.393 INFO:teuthology.orchestra.run.vm01.stderr:Acquiring lock 139822140947520 on /run/cephadm/0d36e446-1bf1-11f1-a427-9918c6d73ee3.lock 2026-03-09T19:50:53.393 INFO:teuthology.orchestra.run.vm01.stderr:Lock 139822140947520 acquired on /run/cephadm/0d36e446-1bf1-11f1-a427-9918c6d73ee3.lock 2026-03-09T19:50:53.393 INFO:teuthology.orchestra.run.vm01.stderr:Verifying IP 192.168.123.101 port 3300 ... 2026-03-09T19:50:53.394 INFO:teuthology.orchestra.run.vm01.stderr:Verifying IP 192.168.123.101 port 6789 ... 2026-03-09T19:50:53.394 INFO:teuthology.orchestra.run.vm01.stderr:Base mon IP is 192.168.123.101, final addrv is [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-03-09T19:50:53.396 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.101 metric 100 2026-03-09T19:50:53.396 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.101 metric 100 2026-03-09T19:50:53.399 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: ::1 dev lo proto kernel metric 256 pref medium 2026-03-09T19:50:53.399 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-09T19:50:53.401 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-09T19:50:53.401 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: inet6 ::1/128 scope host 2026-03-09T19:50:53.401 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-09T19:50:53.401 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: 2: eth0: mtu 1500 state UP qlen 1000 2026-03-09T19:50:53.401 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: inet6 fe80::5055:ff:fe00:1/64 scope link noprefixroute 2026-03-09T19:50:53.402 INFO:teuthology.orchestra.run.vm01.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-09T19:50:53.402 INFO:teuthology.orchestra.run.vm01.stderr:Mon IP `192.168.123.101` is in CIDR network `192.168.123.0/24` 2026-03-09T19:50:53.402 INFO:teuthology.orchestra.run.vm01.stderr:- internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-09T19:50:53.403 INFO:teuthology.orchestra.run.vm01.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-09T19:50:53.422 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: Trying to pull quay.io/ceph/ceph:v17.2.0... 2026-03-09T19:50:54.779 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: Getting image source signatures 2026-03-09T19:50:54.779 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: Copying blob sha256:33ca8fff7868c4dc0c11e09bca97c720eb9cfbab7221216754367dd8de70388a 2026-03-09T19:50:54.779 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: Copying blob sha256:a70843738bb77e1ab9c1f85969ebdfa55f178e746be081d1cb4f94011f69eb7c 2026-03-09T19:50:54.779 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: Copying blob sha256:89b4a75bc2d8500f15463747507c9623df43886c134463e7f0527e70900e7a7b 2026-03-09T19:50:54.779 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: Copying blob sha256:c32ab78b488d0c72f64eded765c0cf6b5bf2c75dab66cb62a9d367fa6ec42513 2026-03-09T19:50:54.779 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: Copying blob sha256:599d07cb321ff0a3c82224e1138fc685793fa69b93ed5780415751a5f7e4b8c2 2026-03-09T19:50:54.780 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: Copying config sha256:e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-09T19:50:54.782 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: Writing manifest to image destination 2026-03-09T19:50:54.786 INFO:teuthology.orchestra.run.vm01.stderr:/bin/podman: e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-09T19:50:54.906 INFO:teuthology.orchestra.run.vm01.stderr:ceph: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-09T19:50:54.935 INFO:teuthology.orchestra.run.vm01.stderr:Ceph version: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-09T19:50:54.935 INFO:teuthology.orchestra.run.vm01.stderr:Extracting ceph user uid/gid from container image... 2026-03-09T19:50:54.991 INFO:teuthology.orchestra.run.vm01.stderr:stat: 167 167 2026-03-09T19:50:55.027 INFO:teuthology.orchestra.run.vm01.stderr:Creating initial keys... 2026-03-09T19:50:55.104 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph-authtool: AQCfJK9pm7g1BhAAb15dKRK3rNGcOiTE7rB9YQ== 2026-03-09T19:50:55.188 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph-authtool: AQCfJK9pBsw6CxAA85WUgDA3V89a0r9Ni6CNNA== 2026-03-09T19:50:55.296 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph-authtool: AQCfJK9pIs2pERAAEGRKeNiPDCwCKF78Y/IjoQ== 2026-03-09T19:50:55.315 INFO:teuthology.orchestra.run.vm01.stderr:Creating initial monmap... 2026-03-09T19:50:55.406 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T19:50:55.406 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/monmaptool: setting min_mon_release = octopus 2026-03-09T19:50:55.406 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: set fsid to 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:50:55.406 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T19:50:55.428 INFO:teuthology.orchestra.run.vm01.stderr:monmaptool for a [v2:192.168.123.101:3300,v1:192.168.123.101:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T19:50:55.428 INFO:teuthology.orchestra.run.vm01.stderr:setting min_mon_release = octopus 2026-03-09T19:50:55.429 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/monmaptool: set fsid to 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:50:55.429 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T19:50:55.429 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T19:50:55.429 INFO:teuthology.orchestra.run.vm01.stderr:Creating mon... 2026-03-09T19:50:55.553 INFO:teuthology.orchestra.run.vm01.stderr:create mon.a on 2026-03-09T19:50:55.723 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-09T19:50:55.845 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3.target → /etc/systemd/system/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3.target. 2026-03-09T19:50:55.845 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: Created symlink /etc/systemd/system/ceph.target.wants/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3.target → /etc/systemd/system/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3.target. 2026-03-09T19:50:56.097 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: Failed to reset failed state of unit ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.a.service: Unit ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.a.service not loaded. 2026-03-09T19:50:56.105 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: Created symlink /etc/systemd/system/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3.target.wants/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.a.service → /etc/systemd/system/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@.service. 2026-03-09T19:50:56.402 INFO:teuthology.orchestra.run.vm01.stderr:firewalld does not appear to be present 2026-03-09T19:50:56.402 INFO:teuthology.orchestra.run.vm01.stderr:Not possible to enable service . firewalld.service is not available 2026-03-09T19:50:56.402 INFO:teuthology.orchestra.run.vm01.stderr:Waiting for mon to start... 2026-03-09T19:50:56.402 INFO:teuthology.orchestra.run.vm01.stderr:Waiting for mon... 2026-03-09T19:50:56.601 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: cluster: 2026-03-09T19:50:56.601 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: id: 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:50:56.601 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: health: HEALTH_OK 2026-03-09T19:50:56.601 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:50:56.601 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: services: 2026-03-09T19:50:56.601 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mon: 1 daemons, quorum a (age 0.168188s) 2026-03-09T19:50:56.601 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mgr: no daemons active 2026-03-09T19:50:56.601 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: osd: 0 osds: 0 up, 0 in 2026-03-09T19:50:56.601 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:50:56.602 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: data: 2026-03-09T19:50:56.602 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: pools: 0 pools, 0 pgs 2026-03-09T19:50:56.602 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: objects: 0 objects, 0 B 2026-03-09T19:50:56.602 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: usage: 0 B used, 0 B / 0 B avail 2026-03-09T19:50:56.602 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: pgs: 2026-03-09T19:50:56.602 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:50:56.645 INFO:teuthology.orchestra.run.vm01.stderr:mon is available 2026-03-09T19:50:56.646 INFO:teuthology.orchestra.run.vm01.stderr:Assimilating anything we can from ceph.conf... 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: [global] 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: fsid = 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mon_host = [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: [mgr] 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mgr/cephadm/use_agent = False 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: [osd] 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-09T19:50:56.982 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-09T19:50:57.030 INFO:teuthology.orchestra.run.vm01.stderr:Generating new minimal ceph.conf... 2026-03-09T19:50:57.222 INFO:teuthology.orchestra.run.vm01.stderr:Restarting the monitor... 2026-03-09T19:50:57.277 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 bash[49615]: Error: no container with name or ID "ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon.a" found: no such container 2026-03-09T19:50:57.501 INFO:teuthology.orchestra.run.vm01.stderr:Setting mon public_network to 192.168.123.0/24 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a[49426]: 2026-03-09T19:50:57.276+0000 7f795253a700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a[49426]: 2026-03-09T19:50:57.276+0000 7f795253a700 -1 mon.a@0(leader) e1 *** Got Signal Terminated *** 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 podman[49623]: 2026-03-09 19:50:57.311378939 +0000 UTC m=+0.048870772 container died 966a809d093b8090a18654652614286103f955b8c39c95db046cb489e237b70b (image=quay.io/ceph/ceph:v17.2.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, release=754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, ceph=True, distribution-scope=public, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-type=git, GIT_BRANCH=HEAD, version=8, io.openshift.expose-services=, maintainer=Guillaume Abrioux , summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0) 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 podman[49623]: 2026-03-09 19:50:57.326185732 +0000 UTC m=+0.063677565 container remove 966a809d093b8090a18654652614286103f955b8c39c95db046cb489e237b70b (image=quay.io/ceph/ceph:v17.2.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, build-date=2022-05-03T08:36:31.336870, ceph=True, com.redhat.component=centos-stream-container, CEPH_POINT_RELEASE=-17.2.0, name=centos-stream, architecture=x86_64, io.buildah.version=1.19.8, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, distribution-scope=public, release=754, version=8, RELEASE=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.k8s.display-name=CentOS Stream 8, maintainer=Guillaume Abrioux , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_CLEAN=True, vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git) 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 bash[49623]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 bash[49642]: Error: no container with name or ID "ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon.a" found: no such container 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.a.service: Deactivated successfully. 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 systemd[1]: Stopped Ceph mon.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 systemd[1]: Starting Ceph mon.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 podman[49684]: 2026-03-09 19:50:57.455691197 +0000 UTC m=+0.017575668 container create dac2494fb686fd88eb678fd8df1466c97fa90ae62385b3860f2dc6ff89915b8f (image=quay.io/ceph/ceph:v17.2.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, io.k8s.display-name=CentOS Stream 8, vcs-type=git, architecture=x86_64, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, GIT_REPO=https://github.com/ceph/ceph-container.git, vendor=Red Hat, Inc., GIT_CLEAN=True, version=8, io.buildah.version=1.19.8, io.openshift.expose-services=, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, RELEASE=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.component=centos-stream-container, ceph=True, name=centos-stream, distribution-scope=public, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, GIT_BRANCH=HEAD) 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 podman[49684]: 2026-03-09 19:50:57.487026646 +0000 UTC m=+0.048911117 container init dac2494fb686fd88eb678fd8df1466c97fa90ae62385b3860f2dc6ff89915b8f (image=quay.io/ceph/ceph:v17.2.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, vendor=Red Hat, Inc., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.expose-services=, io.k8s.display-name=CentOS Stream 8, vcs-type=git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.component=centos-stream-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., ceph=True, name=centos-stream, distribution-scope=public, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, io.buildah.version=1.19.8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, maintainer=Guillaume Abrioux , RELEASE=HEAD, version=8, GIT_BRANCH=HEAD, architecture=x86_64, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, CEPH_POINT_RELEASE=-17.2.0, GIT_REPO=https://github.com/ceph/ceph-container.git) 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 podman[49684]: 2026-03-09 19:50:57.489726383 +0000 UTC m=+0.051610854 container start dac2494fb686fd88eb678fd8df1466c97fa90ae62385b3860f2dc6ff89915b8f (image=quay.io/ceph/ceph:v17.2.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, release=754, maintainer=Guillaume Abrioux , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.display-name=CentOS Stream 8, vendor=Red Hat, Inc., GIT_CLEAN=True, ceph=True, architecture=x86_64, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.buildah.version=1.19.8, com.redhat.component=centos-stream-container, RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vcs-type=git, GIT_BRANCH=HEAD, name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, io.openshift.tags=base centos centos-stream) 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 bash[49684]: dac2494fb686fd88eb678fd8df1466c97fa90ae62385b3860f2dc6ff89915b8f 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 podman[49684]: 2026-03-09 19:50:57.448147646 +0000 UTC m=+0.010032117 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph:v17.2.0 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 systemd[1]: Started Ceph mon.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable), process ceph-mon, pid 2 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: pidfile_write: ignore empty --pid-file 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: load: jerasure load: lrc 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: RocksDB version: 6.15.5 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Git sha rocksdb_build_git_sha:@0@ 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Compile date Apr 18 2022 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: DB SUMMARY 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: DB Session ID: H8SU0VU2CF57XL7GCLCR 2026-03-09T19:50:57.528 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: CURRENT file: CURRENT 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: MANIFEST file: MANIFEST-000009 size: 131 Bytes 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000008.sst 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000010.log size: 73743 ; 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.error_if_exists: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.create_if_missing: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.paranoid_checks: 1 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.env: 0x5616520c6860 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.fs: Posix File System 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.info_log: 0x561654613dc0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.statistics: (nil) 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.use_fsync: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_log_file_size: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.allow_fallocate: 1 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.use_direct_reads: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.db_log_dir: 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.wal_dir: /var/lib/ceph/mon/ceph-a/store.db 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.write_buffer_manager: 0x561654704240 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.new_table_reader_for_compaction_inputs: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.unordered_write: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.row_cache: None 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.wal_filter: None 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T19:50:57.529 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.preserve_deletes: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.two_write_queues: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.atomic_flush: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.log_readahead_size: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_background_jobs: 2 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_background_compactions: -1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_subcompactions: 1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_open_files: -1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_background_flushes: -1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Compression algorithms supported: 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: kZSTD supported: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: kXpressCompression supported: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: kLZ4Compression supported: 1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: kBZip2Compression supported: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: kZlibCompression supported: 1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: kSnappyCompression supported: 1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/version_set.cc:4725] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000009 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/column_family.cc:597] --------------- Options for column family [default]: 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.merge_operator: 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_filter: None 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5616545dfd00) 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: cache_index_and_filter_blocks: 1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: pin_top_level_index_and_filter: 1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: index_type: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: data_block_index_type: 0 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: index_shortening: 1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: hash_index_allow_collision: 1 2026-03-09T19:50:57.530 INFO:journalctl@ceph.mon.a.vm01.stdout: checksum: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: no_block_cache: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache: 0x56165464a170 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache_name: BinnedLRUCache 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache_options: 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: capacity : 536870912 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: num_shard_bits : 4 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: strict_capacity_limit : 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: high_pri_pool_ratio: 0.000 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache_compressed: (nil) 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: persistent_cache: (nil) 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: block_size: 4096 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: block_size_deviation: 10 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: block_restart_interval: 16 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: index_block_restart_interval: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: metadata_block_size: 4096 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: partition_filters: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: use_delta_encoding: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: filter_policy: rocksdb.BuiltinBloomFilter 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: whole_key_filtering: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: verify_compression: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: read_amp_bytes_per_bit: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: format_version: 4 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: enable_index_compression: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout: block_align: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compression: NoCompression 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.num_levels: 7 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T19:50:57.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.arena_block_size: 4194304 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.rate_limit_delay_max_milliseconds: 100 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.table_properties_collectors: 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.inplace_update_support: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.bloom_locality: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.max_successive_merges: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.ttl: 2592000 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.enable_blob_files: false 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.min_blob_size: 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/version_set.cc:4773] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 11, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/version_set.cc:4782] Column family [default] (ID 0), log number is 5 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/version_set.cc:4083] Creating manifest 13 2026-03-09T19:50:57.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773085857527341, "job": 1, "event": "recovery_started", "wal_files": [10]} 2026-03-09T19:50:57.728 INFO:teuthology.orchestra.run.vm01.stderr:Wrote config to /etc/ceph/ceph.conf 2026-03-09T19:50:57.729 INFO:teuthology.orchestra.run.vm01.stderr:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-09T19:50:57.729 INFO:teuthology.orchestra.run.vm01.stderr:Creating mgr... 2026-03-09T19:50:57.729 INFO:teuthology.orchestra.run.vm01.stderr:Verifying port 9283 ... 2026-03-09T19:50:57.787 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/db_impl/db_impl_open.cc:847] Recovering log #10 mode 2 2026-03-09T19:50:57.787 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [table/block_based/filter_policy.cc:996] Using legacy Bloom filter with high (20) bits/key. Dramatic filter space and/or accuracy improvement is available with format_version>=5. 2026-03-09T19:50:57.787 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773085857532470, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 14, "file_size": 70715, "file_checksum": "", "file_checksum_func_name": "Unknown", "table_properties": {"data_size": 69032, "index_size": 176, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 581, "raw_key_size": 9687, "raw_average_key_size": 49, "raw_value_size": 63601, "raw_average_value_size": 324, "num_data_blocks": 8, "num_entries": 196, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1773085857, "oldest_key_time": 0, "file_creation_time": 0, "db_id": "37bc990c-b1c3-41f6-9710-5d7af46b0042", "db_session_id": "H8SU0VU2CF57XL7GCLCR"}} 2026-03-09T19:50:57.787 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/version_set.cc:4083] Creating manifest 15 2026-03-09T19:50:57.787 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773085857534593, "job": 1, "event": "recovery_finished"} 2026-03-09T19:50:57.787 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [file/delete_scheduler.cc:73] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000010.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/db_impl/db_impl_open.cc:1701] SstFileManager instance 0x561654630700 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: DB pointer 0x5616546a4000 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/db_impl/db_impl.cc:902] ------- DUMPING STATS ------- 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: rocksdb: [db/db_impl/db_impl.cc:903] 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ** DB Stats ** 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ** Compaction Stats [default] ** 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: L0 2/0 70.81 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 13.7 0.00 0.00 1 0.005 0 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Sum 2/0 70.81 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 13.7 0.00 0.00 1 0.005 0 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 13.7 0.00 0.00 1 0.005 0 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ** Compaction Stats [default] ** 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 13.7 0.00 0.00 1 0.005 0 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative compaction: 0.00 GB write, 2.78 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval compaction: 0.00 GB write, 2.78 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ** Compaction Stats [default] ** 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: L0 2/0 70.81 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 13.7 0.00 0.00 1 0.005 0 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Sum 2/0 70.81 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 13.7 0.00 0.00 1 0.005 0 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ** Compaction Stats [default] ** 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 13.7 0.00 0.00 1 0.005 0 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative compaction: 0.00 GB write, 2.77 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T19:50:57.788 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: starting mon.a rank 0 at public addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] at bind addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???) e1 preinit fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).mds e0 Unable to load 'last_metadata' 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).mds e1 new map 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).mds e1 print_map 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout: e1 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout: legacy client fscid: -1 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout: No filesystems configured 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).mgr e0 loading version 1 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).mgr e1 active server: (0) 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a@-1(???).mgr e1 mkfs or daemon transitioned to available, loading commands 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: monmap e1: 1 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0]} 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: fsmap 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: osdmap e1: 0 total, 0 up, 0 in 2026-03-09T19:50:57.789 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:57 vm01 ceph-mon[49698]: mgrmap e1: no daemons active 2026-03-09T19:50:57.875 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: Failed to reset failed state of unit ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.y.service: Unit ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.y.service not loaded. 2026-03-09T19:50:57.883 INFO:teuthology.orchestra.run.vm01.stderr:systemctl: Created symlink /etc/systemd/system/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3.target.wants/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.y.service → /etc/systemd/system/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@.service. 2026-03-09T19:50:58.163 INFO:teuthology.orchestra.run.vm01.stderr:firewalld does not appear to be present 2026-03-09T19:50:58.163 INFO:teuthology.orchestra.run.vm01.stderr:Not possible to enable service . firewalld.service is not available 2026-03-09T19:50:58.163 INFO:teuthology.orchestra.run.vm01.stderr:firewalld does not appear to be present 2026-03-09T19:50:58.163 INFO:teuthology.orchestra.run.vm01.stderr:Not possible to open ports <[9283]>. firewalld.service is not available 2026-03-09T19:50:58.163 INFO:teuthology.orchestra.run.vm01.stderr:Waiting for mgr to start... 2026-03-09T19:50:58.164 INFO:teuthology.orchestra.run.vm01.stderr:Waiting for mgr... 2026-03-09T19:50:58.341 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: { 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "fsid": "0d36e446-1bf1-11f1-a427-9918c6d73ee3", 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "health": { 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "checks": {}, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mutes": [] 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum": [ 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 0 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "a" 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum_age": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "monmap": { 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osdmap": { 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "pgmap": { 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "fsmap": { 2026-03-09T19:50:58.342 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "available": false, 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "modules": [ 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "iostat", 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "nfs", 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "restful" 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "services": {} 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "servicemap": { 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "modified": "2026-03-09T19:50:56.432391+0000", 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "services": {} 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-09T19:50:58.343 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: } 2026-03-09T19:50:58.377 INFO:teuthology.orchestra.run.vm01.stderr:mgr not available, waiting (1/15)... 2026-03-09T19:50:59.055 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:58 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/905589797' entity='client.admin' 2026-03-09T19:50:59.055 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:50:58 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/4166138070' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T19:50:59.055 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:50:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:50:58.714+0000 7f6a51c6a000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T19:50:59.055 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:50:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:50:59.053+0000 7f6a51c6a000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T19:50:59.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:50:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:50:59.197+0000 7f6a51c6a000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T19:50:59.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:50:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:50:59.247+0000 7f6a51c6a000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T19:50:59.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:50:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:50:59.417+0000 7f6a51c6a000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T19:51:00.249 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:50:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:50:59.996+0000 7f6a51c6a000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T19:51:00.249 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:00.185+0000 7f6a51c6a000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:51:00.507 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:00.248+0000 7f6a51c6a000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T19:51:00.507 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:00.313+0000 7f6a51c6a000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T19:51:00.507 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:00.378+0000 7f6a51c6a000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T19:51:00.507 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:00.447+0000 7f6a51c6a000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: { 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "fsid": "0d36e446-1bf1-11f1-a427-9918c6d73ee3", 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "health": { 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "checks": {}, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mutes": [] 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum": [ 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 0 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "a" 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum_age": 3, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "monmap": { 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osdmap": { 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "pgmap": { 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-09T19:51:00.592 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "fsmap": { 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "available": false, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "modules": [ 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "iostat", 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "nfs", 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "restful" 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "services": {} 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "servicemap": { 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "modified": "2026-03-09T19:50:56.432391+0000", 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "services": {} 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-09T19:51:00.593 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: } 2026-03-09T19:51:00.625 INFO:teuthology.orchestra.run.vm01.stderr:mgr not available, waiting (2/15)... 2026-03-09T19:51:00.791 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:00 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3786899494' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T19:51:01.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:00.790+0000 7f6a51c6a000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T19:51:01.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:00.864+0000 7f6a51c6a000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T19:51:01.694 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:01.393+0000 7f6a51c6a000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T19:51:01.694 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:01.453+0000 7f6a51c6a000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T19:51:01.694 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:01.513+0000 7f6a51c6a000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T19:51:01.694 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:01.632+0000 7f6a51c6a000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T19:51:01.694 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:01.692+0000 7f6a51c6a000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T19:51:02.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:01.783+0000 7f6a51c6a000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T19:51:02.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:01.867+0000 7f6a51c6a000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:51:02.601 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:02.186+0000 7f6a51c6a000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T19:51:02.601 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:02.246+0000 7f6a51c6a000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: { 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "fsid": "0d36e446-1bf1-11f1-a427-9918c6d73ee3", 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "health": { 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "checks": {}, 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mutes": [] 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum": [ 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 0 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "a" 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum_age": 5, 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "monmap": { 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osdmap": { 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-09T19:51:02.809 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "pgmap": { 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-09T19:51:02.810 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "fsmap": { 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "available": false, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "modules": [ 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "iostat", 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "nfs", 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "restful" 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "services": {} 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "servicemap": { 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "modified": "2026-03-09T19:50:56.432391+0000", 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "services": {} 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-09T19:51:02.811 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: } 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: Activating manager daemon y 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: mgrmap e2: y(active, starting, since 0.29922s) 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: Manager daemon y is now available 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' 2026-03-09T19:51:02.857 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:02 vm01 ceph-mon[49698]: from='mgr.14100 192.168.123.101:0/1575927011' entity='mgr.y' 2026-03-09T19:51:02.857 INFO:teuthology.orchestra.run.vm01.stderr:mgr not available, waiting (3/15)... 2026-03-09T19:51:03.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:03 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1003348185' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T19:51:03.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:03 vm01 ceph-mon[49698]: mgrmap e3: y(active, since 1.30719s) 2026-03-09T19:51:05.116 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: { 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "fsid": "0d36e446-1bf1-11f1-a427-9918c6d73ee3", 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "health": { 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "checks": {}, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mutes": [] 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum": [ 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 0 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "a" 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "quorum_age": 7, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "monmap": { 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osdmap": { 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "pgmap": { 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-09T19:51:05.118 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "fsmap": { 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "available": true, 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "modules": [ 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "iostat", 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "nfs", 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "restful" 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ], 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "services": {} 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "servicemap": { 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "modified": "2026-03-09T19:50:56.432391+0000", 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "services": {} 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: }, 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-09T19:51:05.119 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: } 2026-03-09T19:51:05.163 INFO:teuthology.orchestra.run.vm01.stderr:mgr is available 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: [global] 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: fsid = 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: [mgr] 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: [osd] 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-09T19:51:05.398 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-09T19:51:05.449 INFO:teuthology.orchestra.run.vm01.stderr:Enabling cephadm module... 2026-03-09T19:51:05.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:05 vm01 ceph-mon[49698]: mgrmap e4: y(active, since 2s) 2026-03-09T19:51:05.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:05 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/518645429' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T19:51:05.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:05 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1504135980' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-09T19:51:05.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:05 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1504135980' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-09T19:51:06.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:06 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/756684305' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-09T19:51:06.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ignoring --setuser ceph since I am not root 2026-03-09T19:51:06.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ignoring --setgroup ceph since I am not root 2026-03-09T19:51:06.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:06.769+0000 7ff84ab19000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T19:51:06.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:06.824+0000 7ff84ab19000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T19:51:06.932 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: { 2026-03-09T19:51:06.932 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 5, 2026-03-09T19:51:06.932 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "available": true, 2026-03-09T19:51:06.932 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-09T19:51:06.932 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-09T19:51:06.932 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: } 2026-03-09T19:51:07.244 INFO:teuthology.orchestra.run.vm01.stderr:Waiting for the mgr to restart... 2026-03-09T19:51:07.244 INFO:teuthology.orchestra.run.vm01.stderr:Waiting for mgr epoch 5... 2026-03-09T19:51:07.613 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:07.240+0000 7ff84ab19000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T19:51:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:07 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/756684305' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-09T19:51:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:07 vm01 ceph-mon[49698]: mgrmap e5: y(active, since 4s) 2026-03-09T19:51:07.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:07 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/445379014' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-09T19:51:07.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:07.650+0000 7ff84ab19000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T19:51:07.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:07.796+0000 7ff84ab19000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T19:51:07.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:07.850+0000 7ff84ab19000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T19:51:08.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:08.022+0000 7ff84ab19000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T19:51:08.872 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:08.619+0000 7ff84ab19000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T19:51:08.873 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:08.811+0000 7ff84ab19000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:51:09.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:08.871+0000 7ff84ab19000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T19:51:09.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:08.928+0000 7ff84ab19000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T19:51:09.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:08.990+0000 7ff84ab19000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T19:51:09.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:09 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:09.045+0000 7ff84ab19000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T19:51:09.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:09 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:09.348+0000 7ff84ab19000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T19:51:09.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:09 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:09.422+0000 7ff84ab19000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T19:51:10.271 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:09 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:09.968+0000 7ff84ab19000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T19:51:10.271 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:10.027+0000 7ff84ab19000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T19:51:10.271 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:10.088+0000 7ff84ab19000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T19:51:10.271 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:10.210+0000 7ff84ab19000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T19:51:10.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:10.270+0000 7ff84ab19000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T19:51:10.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:10.370+0000 7ff84ab19000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T19:51:10.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:10.464+0000 7ff84ab19000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:51:11.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:10.774+0000 7ff84ab19000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T19:51:11.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:10.835+0000 7ff84ab19000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T19:51:11.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:10 vm01 ceph-mon[49698]: Active manager daemon y restarted 2026-03-09T19:51:11.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:10 vm01 ceph-mon[49698]: Activating manager daemon y 2026-03-09T19:51:11.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:10 vm01 ceph-mon[49698]: osdmap e2: 0 total, 0 up, 0 in 2026-03-09T19:51:11.919 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:11 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:51:11] ENGINE Bus STARTING 2026-03-09T19:51:11.919 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:11 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:51:11] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T19:51:11.919 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:11 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:51:11] ENGINE Bus STARTED 2026-03-09T19:51:11.924 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: { 2026-03-09T19:51:11.924 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mgrmap_epoch": 7, 2026-03-09T19:51:11.924 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "initialized": true 2026-03-09T19:51:11.924 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: } 2026-03-09T19:51:11.950 INFO:teuthology.orchestra.run.vm01.stderr:mgr epoch 5 is available 2026-03-09T19:51:11.950 INFO:teuthology.orchestra.run.vm01.stderr:Setting orchestrator backend to cephadm... 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: mgrmap e6: y(active, starting, since 0.0542844s) 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: Manager daemon y is now available 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: [09/Mar/2026:19:51:11] ENGINE Bus STARTING 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:12.208 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:11 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:12.467 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: value unchanged 2026-03-09T19:51:12.516 INFO:teuthology.orchestra.run.vm01.stderr:Generating ssh key... 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: [09/Mar/2026:19:51:11] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: [09/Mar/2026:19:51:11] ENGINE Bus STARTED 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: mgrmap e7: y(active, since 1.06111s) 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:13.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:12 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: Generating public/private rsa key pair. 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: Your identification has been saved in /tmp/tmpw9kbsa7x/key. 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: Your public key has been saved in /tmp/tmpw9kbsa7x/key.pub. 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: The key fingerprint is: 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: SHA256:0T4PjLpA1lSoDafUzf8enRnuAqtkS8zUi1lNkhoLzYU ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: The key's randomart image is: 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: +---[RSA 3072]----+ 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: | . +... | 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: | o ++Eo . | 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: | . *o =.+ . | 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: | oo.. X.+ . | 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: | o . S B..o + | 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: | o = +.=o = | 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: | . . O .+.o | 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: | . = .. o . | 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: | . o. . | 2026-03-09T19:51:13.053 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: +----[SHA256]-----+ 2026-03-09T19:51:13.053 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03G+/s7YYQ1P83m4DX9GPOO9cXsF4ebzZYrXpv4PIu7XVhIcBCBq97TiuPf3vXtyJjayagXbYVn7hyno5G6G2BPXW7BLmgDMfLvLTaNku+oOmcOKGMys8ULjv68rx1OUcX/KDlZPOEoplxwVO9X6S5+WdYHGRV7hAufJGOGBBj0tRpbYRDgNry/ckZCpsOTrFoipDBZuqZN8RNB+nST0q8fHQ6WtCClIxHSuVJcsuiHTpPdx23k92HFXhJFGW2dprIr8NzVJ9mpO0OFBJcLw6JHZDmeb5TyRh/wktCSq3vft6HYHZK9iCLlYFAjTVv8HZQQsFY1OTOPTTWMar+xxXga7zT9Fv6zSGXYhXaKsoLmDWZUDe10qRk3PsLMYuRwiEqeXVyvZgTMZCXtsDIaoowXddPb7RovH3wthOgVb9FwHb5xMKIvurBVqkseTSGDdNRkPXmdIDRY+Jx5+wux6JEFkQxdWXOxY+qwjc2mfId013UsCq9iN7Y4MJz6V1KxE= ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:51:13.091 INFO:teuthology.orchestra.run.vm01.stderr:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-09T19:51:13.091 INFO:teuthology.orchestra.run.vm01.stderr:Adding key to root@localhost authorized_keys... 2026-03-09T19:51:13.091 INFO:teuthology.orchestra.run.vm01.stderr:Adding host vm01... 2026-03-09T19:51:13.992 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: Added host 'vm01' with addr '192.168.123.101' 2026-03-09T19:51:14.024 INFO:teuthology.orchestra.run.vm01.stderr:Deploying unmanaged mon service... 2026-03-09T19:51:14.294 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:13 vm01 ceph-mon[49698]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:14.294 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:13 vm01 ceph-mon[49698]: Generating ssh key... 2026-03-09T19:51:14.294 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:13 vm01 ceph-mon[49698]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:14.294 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:13 vm01 ceph-mon[49698]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm01", "addr": "192.168.123.101", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:14.294 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:13 vm01 ceph-mon[49698]: Deploying cephadm binary to vm01 2026-03-09T19:51:14.294 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:13 vm01 ceph-mon[49698]: mgrmap e8: y(active, since 2s) 2026-03-09T19:51:14.295 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: Scheduled mon update... 2026-03-09T19:51:14.330 INFO:teuthology.orchestra.run.vm01.stderr:Deploying unmanaged mgr service... 2026-03-09T19:51:14.573 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: Scheduled mgr update... 2026-03-09T19:51:15.120 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:15.120 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: Added host vm01 2026-03-09T19:51:15.120 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:15.120 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:15.121 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: Saving service mon spec with placement count:5 2026-03-09T19:51:15.121 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:15.121 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:15.121 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: Saving service mgr spec with placement count:2 2026-03-09T19:51:15.121 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:15.121 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:14 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/849728227' entity='client.admin' 2026-03-09T19:51:15.139 INFO:teuthology.orchestra.run.vm01.stderr:Enabling the dashboard module... 2026-03-09T19:51:16.377 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:16 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3175389667' entity='client.admin' 2026-03-09T19:51:16.377 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:16 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2360112575' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-09T19:51:16.377 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:16 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:16.377 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:16 vm01 ceph-mon[49698]: from='mgr.14120 192.168.123.101:0/1724563487' entity='mgr.y' 2026-03-09T19:51:16.377 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ignoring --setuser ceph since I am not root 2026-03-09T19:51:16.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ignoring --setgroup ceph since I am not root 2026-03-09T19:51:16.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:16.318+0000 7f0b1cb6a000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T19:51:16.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:16.375+0000 7f0b1cb6a000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T19:51:16.474 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: { 2026-03-09T19:51:16.474 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "epoch": 9, 2026-03-09T19:51:16.474 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "available": true, 2026-03-09T19:51:16.474 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-09T19:51:16.474 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-09T19:51:16.474 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: } 2026-03-09T19:51:16.515 INFO:teuthology.orchestra.run.vm01.stderr:Waiting for the mgr to restart... 2026-03-09T19:51:16.515 INFO:teuthology.orchestra.run.vm01.stderr:Waiting for mgr epoch 9... 2026-03-09T19:51:17.110 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:16.786+0000 7f0b1cb6a000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T19:51:17.361 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:17.164+0000 7f0b1cb6a000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T19:51:17.361 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:17.303+0000 7f0b1cb6a000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T19:51:17.362 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:17 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2360112575' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-09T19:51:17.362 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:17 vm01 ceph-mon[49698]: mgrmap e9: y(active, since 5s) 2026-03-09T19:51:17.362 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:17 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2222234183' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-09T19:51:17.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:17.360+0000 7f0b1cb6a000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T19:51:17.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:17.542+0000 7f0b1cb6a000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T19:51:18.393 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:18.137+0000 7f0b1cb6a000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T19:51:18.393 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:18.329+0000 7f0b1cb6a000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:51:18.873 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:18.391+0000 7f0b1cb6a000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T19:51:18.873 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:18.448+0000 7f0b1cb6a000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T19:51:18.873 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:18.510+0000 7f0b1cb6a000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T19:51:18.873 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:18.565+0000 7f0b1cb6a000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T19:51:19.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:18.871+0000 7f0b1cb6a000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T19:51:19.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:18.942+0000 7f0b1cb6a000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T19:51:19.760 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:19.505+0000 7f0b1cb6a000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T19:51:19.760 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:19.568+0000 7f0b1cb6a000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T19:51:19.761 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:19.632+0000 7f0b1cb6a000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T19:51:20.020 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:19.759+0000 7f0b1cb6a000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T19:51:20.020 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:19.823+0000 7f0b1cb6a000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T19:51:20.020 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:19.926+0000 7f0b1cb6a000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T19:51:20.336 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:20.018+0000 7f0b1cb6a000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:51:20.587 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:20.334+0000 7f0b1cb6a000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T19:51:20.587 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:51:20.397+0000 7f0b1cb6a000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T19:51:20.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:20 vm01 ceph-mon[49698]: Active manager daemon y restarted 2026-03-09T19:51:20.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:20 vm01 ceph-mon[49698]: Activating manager daemon y 2026-03-09T19:51:20.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:51:20] ENGINE Bus STARTING 2026-03-09T19:51:21.238 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:51:20] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T19:51:21.238 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:51:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:51:20] ENGINE Bus STARTED 2026-03-09T19:51:21.542 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: { 2026-03-09T19:51:21.542 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "mgrmap_epoch": 11, 2026-03-09T19:51:21.542 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: "initialized": true 2026-03-09T19:51:21.542 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: } 2026-03-09T19:51:21.585 INFO:teuthology.orchestra.run.vm01.stderr:mgr epoch 9 is available 2026-03-09T19:51:21.585 INFO:teuthology.orchestra.run.vm01.stderr:Generating a dashboard self-signed certificate... 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: osdmap e3: 0 total, 0 up, 0 in 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: mgrmap e10: y(active, starting, since 0.119212s) 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: Manager daemon y is now available 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: [09/Mar/2026:19:51:20] ENGINE Bus STARTING 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: [09/Mar/2026:19:51:20] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: [09/Mar/2026:19:51:20] ENGINE Bus STARTED 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:21.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:21 vm01 ceph-mon[49698]: mgrmap e11: y(active, since 1.12952s) 2026-03-09T19:51:22.057 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: Self-signed certificate created 2026-03-09T19:51:22.093 INFO:teuthology.orchestra.run.vm01.stderr:Creating initial admin user... 2026-03-09T19:51:22.513 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: {"username": "admin", "password": "$2b$12$nIeVZ1HtQgRGkeT9JdXOC.KIK9hoLZeqGH1g8jjtMmDx4u78SuWIS", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773085882, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-09T19:51:22.543 INFO:teuthology.orchestra.run.vm01.stderr:Fetching dashboard port number... 2026-03-09T19:51:22.785 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: 8443 2026-03-09T19:51:23.115 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:23 vm01 ceph-mon[49698]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-09T19:51:23.115 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:23 vm01 ceph-mon[49698]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-09T19:51:23.115 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:23 vm01 ceph-mon[49698]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:23.115 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:23 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:23.115 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:23 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:23.115 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:23 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:23.115 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:23 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/505303162' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-09T19:51:23.132 INFO:teuthology.orchestra.run.vm01.stderr:firewalld does not appear to be present 2026-03-09T19:51:23.132 INFO:teuthology.orchestra.run.vm01.stderr:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-09T19:51:23.132 INFO:teuthology.orchestra.run.vm01.stderr:Ceph Dashboard is now available at: 2026-03-09T19:51:23.132 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T19:51:23.132 INFO:teuthology.orchestra.run.vm01.stderr: URL: https://vm01.local:8443/ 2026-03-09T19:51:23.132 INFO:teuthology.orchestra.run.vm01.stderr: User: admin 2026-03-09T19:51:23.132 INFO:teuthology.orchestra.run.vm01.stderr: Password: ajcrjuwrn6 2026-03-09T19:51:23.132 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T19:51:23.132 INFO:teuthology.orchestra.run.vm01.stderr:Enabling autotune for osd_memory_target 2026-03-09T19:51:23.700 INFO:teuthology.orchestra.run.vm01.stderr:/usr/bin/ceph: set mgr/dashboard/cluster/status 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr:You can access the Ceph CLI with: 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr: sudo /home/ubuntu/cephtest/cephadm shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr:Please consider enabling telemetry to help improve Ceph: 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr: ceph telemetry on 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr:For more information see: 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr: https://docs.ceph.com/docs/master/mgr/telemetry/ 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T19:51:23.729 INFO:teuthology.orchestra.run.vm01.stderr:Bootstrap complete. 2026-03-09T19:51:23.760 INFO:tasks.cephadm:Fetching config... 2026-03-09T19:51:23.761 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:51:23.761 DEBUG:teuthology.orchestra.run.vm01:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-09T19:51:23.799 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-09T19:51:23.799 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:51:23.799 DEBUG:teuthology.orchestra.run.vm01:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-09T19:51:23.859 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-09T19:51:23.860 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:51:23.860 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/keyring of=/dev/stdout 2026-03-09T19:51:23.927 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-09T19:51:23.927 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:51:23.927 DEBUG:teuthology.orchestra.run.vm01:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-09T19:51:24.002 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-09T19:51:24.002 DEBUG:teuthology.orchestra.run.vm01:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03G+/s7YYQ1P83m4DX9GPOO9cXsF4ebzZYrXpv4PIu7XVhIcBCBq97TiuPf3vXtyJjayagXbYVn7hyno5G6G2BPXW7BLmgDMfLvLTaNku+oOmcOKGMys8ULjv68rx1OUcX/KDlZPOEoplxwVO9X6S5+WdYHGRV7hAufJGOGBBj0tRpbYRDgNry/ckZCpsOTrFoipDBZuqZN8RNB+nST0q8fHQ6WtCClIxHSuVJcsuiHTpPdx23k92HFXhJFGW2dprIr8NzVJ9mpO0OFBJcLw6JHZDmeb5TyRh/wktCSq3vft6HYHZK9iCLlYFAjTVv8HZQQsFY1OTOPTTWMar+xxXga7zT9Fv6zSGXYhXaKsoLmDWZUDe10qRk3PsLMYuRwiEqeXVyvZgTMZCXtsDIaoowXddPb7RovH3wthOgVb9FwHb5xMKIvurBVqkseTSGDdNRkPXmdIDRY+Jx5+wux6JEFkQxdWXOxY+qwjc2mfId013UsCq9iN7Y4MJz6V1KxE= ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T19:51:24.084 INFO:teuthology.orchestra.run.vm01.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03G+/s7YYQ1P83m4DX9GPOO9cXsF4ebzZYrXpv4PIu7XVhIcBCBq97TiuPf3vXtyJjayagXbYVn7hyno5G6G2BPXW7BLmgDMfLvLTaNku+oOmcOKGMys8ULjv68rx1OUcX/KDlZPOEoplxwVO9X6S5+WdYHGRV7hAufJGOGBBj0tRpbYRDgNry/ckZCpsOTrFoipDBZuqZN8RNB+nST0q8fHQ6WtCClIxHSuVJcsuiHTpPdx23k92HFXhJFGW2dprIr8NzVJ9mpO0OFBJcLw6JHZDmeb5TyRh/wktCSq3vft6HYHZK9iCLlYFAjTVv8HZQQsFY1OTOPTTWMar+xxXga7zT9Fv6zSGXYhXaKsoLmDWZUDe10qRk3PsLMYuRwiEqeXVyvZgTMZCXtsDIaoowXddPb7RovH3wthOgVb9FwHb5xMKIvurBVqkseTSGDdNRkPXmdIDRY+Jx5+wux6JEFkQxdWXOxY+qwjc2mfId013UsCq9iN7Y4MJz6V1KxE= ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:51:24.098 DEBUG:teuthology.orchestra.run.vm06:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03G+/s7YYQ1P83m4DX9GPOO9cXsF4ebzZYrXpv4PIu7XVhIcBCBq97TiuPf3vXtyJjayagXbYVn7hyno5G6G2BPXW7BLmgDMfLvLTaNku+oOmcOKGMys8ULjv68rx1OUcX/KDlZPOEoplxwVO9X6S5+WdYHGRV7hAufJGOGBBj0tRpbYRDgNry/ckZCpsOTrFoipDBZuqZN8RNB+nST0q8fHQ6WtCClIxHSuVJcsuiHTpPdx23k92HFXhJFGW2dprIr8NzVJ9mpO0OFBJcLw6JHZDmeb5TyRh/wktCSq3vft6HYHZK9iCLlYFAjTVv8HZQQsFY1OTOPTTWMar+xxXga7zT9Fv6zSGXYhXaKsoLmDWZUDe10qRk3PsLMYuRwiEqeXVyvZgTMZCXtsDIaoowXddPb7RovH3wthOgVb9FwHb5xMKIvurBVqkseTSGDdNRkPXmdIDRY+Jx5+wux6JEFkQxdWXOxY+qwjc2mfId013UsCq9iN7Y4MJz6V1KxE= ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T19:51:24.134 INFO:teuthology.orchestra.run.vm06.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03G+/s7YYQ1P83m4DX9GPOO9cXsF4ebzZYrXpv4PIu7XVhIcBCBq97TiuPf3vXtyJjayagXbYVn7hyno5G6G2BPXW7BLmgDMfLvLTaNku+oOmcOKGMys8ULjv68rx1OUcX/KDlZPOEoplxwVO9X6S5+WdYHGRV7hAufJGOGBBj0tRpbYRDgNry/ckZCpsOTrFoipDBZuqZN8RNB+nST0q8fHQ6WtCClIxHSuVJcsuiHTpPdx23k92HFXhJFGW2dprIr8NzVJ9mpO0OFBJcLw6JHZDmeb5TyRh/wktCSq3vft6HYHZK9iCLlYFAjTVv8HZQQsFY1OTOPTTWMar+xxXga7zT9Fv6zSGXYhXaKsoLmDWZUDe10qRk3PsLMYuRwiEqeXVyvZgTMZCXtsDIaoowXddPb7RovH3wthOgVb9FwHb5xMKIvurBVqkseTSGDdNRkPXmdIDRY+Jx5+wux6JEFkQxdWXOxY+qwjc2mfId013UsCq9iN7Y4MJz6V1KxE= ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:51:24.145 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-09T19:51:24.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:24 vm01 ceph-mon[49698]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:24.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:24 vm01 ceph-mon[49698]: mgrmap e12: y(active, since 2s) 2026-03-09T19:51:24.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:24 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/594689094' entity='client.admin' 2026-03-09T19:51:25.784 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-09T19:51:25.784 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2160096466' entity='client.admin' 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:26.618 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:26 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:26.843 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm06 2026-03-09T19:51:26.843 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T19:51:26.843 DEBUG:teuthology.orchestra.run.vm06:> dd of=/etc/ceph/ceph.conf 2026-03-09T19:51:26.859 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T19:51:26.860 DEBUG:teuthology.orchestra.run.vm06:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:51:26.919 INFO:tasks.cephadm:Adding host vm06 to orchestrator... 2026-03-09T19:51:26.919 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch host add vm06 2026-03-09T19:51:28.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:27 vm01 ceph-mon[49698]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:28.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:27 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:51:28.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:27 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:51:28.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:27 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:28.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:27 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:28.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:27 vm01 ceph-mon[49698]: mgrmap e13: y(active, since 7s) 2026-03-09T19:51:28.472 INFO:teuthology.orchestra.run.vm01.stdout:Added host 'vm06' with addr '192.168.123.106' 2026-03-09T19:51:28.516 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch host ls --format=json 2026-03-09T19:51:29.115 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:51:29.116 INFO:teuthology.orchestra.run.vm01.stdout:[{"addr": "192.168.123.101", "hostname": "vm01", "labels": [], "status": ""}, {"addr": "192.168.123.106", "hostname": "vm06", "labels": [], "status": ""}] 2026-03-09T19:51:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:29 vm01 ceph-mon[49698]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm06", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:29 vm01 ceph-mon[49698]: Deploying cephadm binary to vm06 2026-03-09T19:51:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:29 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:29 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:29 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:29 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:29 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:29.480 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-09T19:51:29.480 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd crush tunables default 2026-03-09T19:51:30.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:30 vm01 ceph-mon[49698]: Added host vm06 2026-03-09T19:51:30.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:30 vm01 ceph-mon[49698]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T19:51:30.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:30 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:30.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:30 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:30.355 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:30 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1914699157' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-09T19:51:30.490 INFO:teuthology.orchestra.run.vm01.stderr:adjusted tunables profile to default 2026-03-09T19:51:30.587 INFO:tasks.cephadm:Adding mon.a on vm01 2026-03-09T19:51:30.587 INFO:tasks.cephadm:Adding mon.c on vm01 2026-03-09T19:51:30.587 INFO:tasks.cephadm:Adding mon.b on vm06 2026-03-09T19:51:30.587 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch apply mon '3;vm01:192.168.123.101=a;vm01:[v2:192.168.123.101:3301,v1:192.168.123.101:6790]=c;vm06:192.168.123.106=b' 2026-03-09T19:51:31.124 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled mon update... 2026-03-09T19:51:31.186 DEBUG:teuthology.orchestra.run.vm01:mon.c> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.c.service 2026-03-09T19:51:31.188 DEBUG:teuthology.orchestra.run.vm06:mon.b> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.b.service 2026-03-09T19:51:31.189 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-09T19:51:31.189 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph mon dump -f json 2026-03-09T19:51:31.486 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1914699157' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-09T19:51:31.486 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T19:51:31.750 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T19:51:31.750 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"0d36e446-1bf1-11f1-a427-9918c6d73ee3","modified":"2026-03-09T19:50:55.405959Z","created":"2026-03-09T19:50:55.405959Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T19:51:31.750 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "3;vm01:192.168.123.101=a;vm01:[v2:192.168.123.101:3301,v1:192.168.123.101:6790]=c;vm06:192.168.123.106=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: Saving service mon spec with placement vm01:192.168.123.101=a;vm01:[v2:192.168.123.101:3301,v1:192.168.123.101:6790]=c;vm06:192.168.123.106=b;count:3 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:31.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:31 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:51:32.736 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:32 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:51:32.736 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:32 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:32.736 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:32 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:32.736 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:32 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:32.736 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:32 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:32.736 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:32 vm01 ceph-mon[49698]: Deploying daemon mon.c on vm01 2026-03-09T19:51:32.736 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:32 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/2847371904' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T19:51:32.801 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-09T19:51:32.801 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph mon dump -f json 2026-03-09T19:51:33.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:32 vm01 ceph-mon[53908]: mon.c@-1(synchronizing) e1 handle_conf_change mon_allow_pool_delete,mon_cluster_log_to_file 2026-03-09T19:51:34.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:34 vm06 ceph-mon[48187]: mon.b@-1(synchronizing).paxosservice(auth 1..3) refresh upgraded, format 0 -> 3 2026-03-09T19:51:38.417 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: Deploying daemon mon.b on vm06 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: mon.a calling monitor election 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: mon.c calling monitor election 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: monmap e2: 2 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0],c=[v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0]} 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: fsmap 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: mgrmap e13: y(active, since 17s) 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: overall HEALTH_OK 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: Deploying daemon mon.b on vm06 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: mon.a calling monitor election 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: mon.c calling monitor election 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: monmap e2: 2 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0],c=[v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0]} 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: fsmap 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: mgrmap e13: y(active, since 17s) 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: overall HEALTH_OK 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:38.418 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: mon.a calling monitor election 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: mon.c calling monitor election 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: mon.b calling monitor election 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: monmap e3: 3 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0],b=[v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0],c=[v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0]} 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: fsmap 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: mgrmap e13: y(active, since 22s) 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: overall HEALTH_OK 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:51:43.590 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: mon.a calling monitor election 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: mon.c calling monitor election 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: mon.b calling monitor election 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: monmap e3: 3 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0],b=[v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0],c=[v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0]} 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: fsmap 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: mgrmap e13: y(active, since 22s) 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: overall HEALTH_OK 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:51:43.591 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: Deploying daemon mon.b on vm06 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: mon.a calling monitor election 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: mon.c calling monitor election 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: monmap e2: 2 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0],c=[v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0]} 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: fsmap 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: mgrmap e13: y(active, since 17s) 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: overall HEALTH_OK 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: mon.a calling monitor election 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: mon.c calling monitor election 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: mon.b calling monitor election 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: monmap e3: 3 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0],b=[v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0],c=[v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0]} 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: fsmap 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: mgrmap e13: y(active, since 22s) 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: overall HEALTH_OK 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:51:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:43.749 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T19:51:43.749 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":3,"fsid":"0d36e446-1bf1-11f1-a427-9918c6d73ee3","modified":"2026-03-09T19:51:38.174339Z","created":"2026-03-09T19:50:55.405959Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3301","nonce":0},{"type":"v1","addr":"192.168.123.101:6790","nonce":0}]},"addr":"192.168.123.101:6790/0","public_addr":"192.168.123.101:6790/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":2,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1,2]} 2026-03-09T19:51:43.752 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 3 2026-03-09T19:51:43.803 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-09T19:51:43.803 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph config generate-minimal-conf 2026-03-09T19:51:44.336 INFO:teuthology.orchestra.run.vm01.stdout:# minimal ceph.conf for 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:51:44.337 INFO:teuthology.orchestra.run.vm01.stdout:[global] 2026-03-09T19:51:44.337 INFO:teuthology.orchestra.run.vm01.stdout: fsid = 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:51:44.337 INFO:teuthology.orchestra.run.vm01.stdout: mon_host = [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] [v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0] 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: Reconfiguring mon.a (unknown last config time)... 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: Reconfiguring daemon mon.a on vm01 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: Reconfiguring mon.c (monmap changed)... 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: Reconfiguring daemon mon.c on vm01 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/843526378' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: Reconfiguring mon.b (monmap changed)... 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: Reconfiguring daemon mon.b on vm06 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/455760848' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: Reconfiguring mon.a (unknown last config time)... 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: Reconfiguring daemon mon.a on vm01 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: Reconfiguring mon.c (monmap changed)... 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: Reconfiguring daemon mon.c on vm01 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/843526378' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T19:51:44.405 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: Reconfiguring mon.b (monmap changed)... 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: Reconfiguring daemon mon.b on vm06 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:44.406 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:44 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/455760848' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.406 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-09T19:51:44.406 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:51:44.406 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.conf 2026-03-09T19:51:44.438 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:51:44.438 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:51:44.510 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T19:51:44.510 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.conf 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: Reconfiguring mon.a (unknown last config time)... 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: Reconfiguring daemon mon.a on vm01 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: Reconfiguring mon.c (monmap changed)... 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:44.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: Reconfiguring daemon mon.c on vm01 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/843526378' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: Reconfiguring mon.b (monmap changed)... 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: Reconfiguring daemon mon.b on vm06 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:44.536 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:44 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/455760848' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:44.539 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T19:51:44.539 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:51:44.608 INFO:tasks.cephadm:Adding mgr.y on vm01 2026-03-09T19:51:44.608 INFO:tasks.cephadm:Adding mgr.x on vm06 2026-03-09T19:51:44.608 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch apply mgr '2;vm01=y;vm06=x' 2026-03-09T19:51:45.105 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled mgr update... 2026-03-09T19:51:45.177 DEBUG:teuthology.orchestra.run.vm06:mgr.x> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x.service 2026-03-09T19:51:45.179 INFO:tasks.cephadm:Deploying OSDs... 2026-03-09T19:51:45.179 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:51:45.179 DEBUG:teuthology.orchestra.run.vm01:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T19:51:45.194 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T19:51:45.194 DEBUG:teuthology.orchestra.run.vm01:> ls /dev/[sv]d? 2026-03-09T19:51:45.249 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vda 2026-03-09T19:51:45.249 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vdb 2026-03-09T19:51:45.249 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vdc 2026-03-09T19:51:45.249 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vdd 2026-03-09T19:51:45.249 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vde 2026-03-09T19:51:45.250 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T19:51:45.250 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T19:51:45.250 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vdb 2026-03-09T19:51:45.306 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vdb 2026-03-09T19:51:45.306 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T19:51:45.306 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 226 Links: 1 Device type: fc,10 2026-03-09T19:51:45.306 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T19:51:45.306 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T19:51:45.306 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-09 19:51:24.217126654 +0000 2026-03-09T19:51:45.306 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-09 19:51:23.985130372 +0000 2026-03-09T19:51:45.306 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-09 19:51:23.985130372 +0000 2026-03-09T19:51:45.306 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-09 19:46:06.244000000 +0000 2026-03-09T19:51:45.306 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T19:51:45.372 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-09T19:51:45.372 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-09T19:51:45.372 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000187821 s, 2.7 MB/s 2026-03-09T19:51:45.373 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T19:51:45.429 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vdc 2026-03-09T19:51:45.486 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vdc 2026-03-09T19:51:45.486 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T19:51:45.486 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-09T19:51:45.486 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T19:51:45.486 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T19:51:45.486 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-09 19:51:24.278125676 +0000 2026-03-09T19:51:45.486 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-09 19:51:23.992130260 +0000 2026-03-09T19:51:45.486 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-09 19:51:23.992130260 +0000 2026-03-09T19:51:45.486 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-09 19:46:06.256000000 +0000 2026-03-09T19:51:45.486 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T19:51:45.552 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-09T19:51:45.552 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-09T19:51:45.552 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000212768 s, 2.4 MB/s 2026-03-09T19:51:45.553 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T19:51:45.611 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vdd 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='client.24104 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm01=y;vm06=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: Saving service mgr spec with placement vm01=y;vm06=x;count:2 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:45.625 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:45 vm06 ceph-mon[48187]: Deploying daemon mgr.x on vm06 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='client.24104 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm01=y;vm06=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: Saving service mgr spec with placement vm01=y;vm06=x;count:2 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[49698]: Deploying daemon mgr.x on vm06 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='client.24104 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm01=y;vm06=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: Saving service mgr spec with placement vm01=y;vm06=x;count:2 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:45.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:45 vm01 ceph-mon[53908]: Deploying daemon mgr.x on vm06 2026-03-09T19:51:45.646 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vdd 2026-03-09T19:51:45.646 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T19:51:45.646 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-09T19:51:45.646 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T19:51:45.646 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T19:51:45.646 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-09 19:51:24.339124699 +0000 2026-03-09T19:51:45.646 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-09 19:51:23.997130180 +0000 2026-03-09T19:51:45.646 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-09 19:51:23.997130180 +0000 2026-03-09T19:51:45.646 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-09 19:46:06.262000000 +0000 2026-03-09T19:51:45.646 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T19:51:45.710 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-09T19:51:45.710 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-09T19:51:45.710 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000177011 s, 2.9 MB/s 2026-03-09T19:51:45.711 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T19:51:45.769 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vde 2026-03-09T19:51:45.826 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vde 2026-03-09T19:51:45.826 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T19:51:45.826 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-09T19:51:45.826 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T19:51:45.826 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T19:51:45.826 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-09 19:51:24.398123753 +0000 2026-03-09T19:51:45.826 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-09 19:51:23.989130308 +0000 2026-03-09T19:51:45.826 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-09 19:51:23.989130308 +0000 2026-03-09T19:51:45.826 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-09 19:46:06.336000000 +0000 2026-03-09T19:51:45.826 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T19:51:45.885 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:45 vm06 systemd[1]: Starting Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:51:45.889 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-09T19:51:45.889 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-09T19:51:45.889 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000121558 s, 4.2 MB/s 2026-03-09T19:51:45.890 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T19:51:45.950 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T19:51:45.950 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T19:51:45.984 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T19:51:45.984 DEBUG:teuthology.orchestra.run.vm06:> ls /dev/[sv]d? 2026-03-09T19:51:46.100 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vda 2026-03-09T19:51:46.100 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdb 2026-03-09T19:51:46.100 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdc 2026-03-09T19:51:46.100 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdd 2026-03-09T19:51:46.100 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vde 2026-03-09T19:51:46.100 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T19:51:46.100 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T19:51:46.100 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdb 2026-03-09T19:51:46.202 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdb 2026-03-09T19:51:46.203 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T19:51:46.203 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-09T19:51:46.203 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T19:51:46.203 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T19:51:46.203 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 19:51:30.441034642 +0000 2026-03-09T19:51:46.203 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 19:51:30.110034350 +0000 2026-03-09T19:51:46.203 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 19:51:30.110034350 +0000 2026-03-09T19:51:46.203 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 19:45:34.276000000 +0000 2026-03-09T19:51:46.203 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T19:51:46.254 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:45 vm06 podman[49200]: 2026-03-09 19:51:45.885172865 +0000 UTC m=+0.021829829 container create de90cfa26b90b23729ca95c1956961184fbe2a25bfc50e6cc5e34cdf4e07c0d2 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.buildah.version=1.19.8, architecture=x86_64, io.openshift.expose-services=, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, release=754, GIT_CLEAN=True, io.k8s.display-name=CentOS Stream 8, name=centos-stream, RELEASE=HEAD, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, build-date=2022-05-03T08:36:31.336870, GIT_REPO=https://github.com/ceph/ceph-container.git, ceph=True, io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.component=centos-stream-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, distribution-scope=public, version=8) 2026-03-09T19:51:46.254 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:45 vm06 podman[49200]: 2026-03-09 19:51:45.923089762 +0000 UTC m=+0.059746725 container init de90cfa26b90b23729ca95c1956961184fbe2a25bfc50e6cc5e34cdf4e07c0d2 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, release=754, version=8, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, distribution-scope=public, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, GIT_BRANCH=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., io.k8s.display-name=CentOS Stream 8, name=centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.tags=base centos centos-stream, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream) 2026-03-09T19:51:46.254 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:45 vm06 podman[49200]: 2026-03-09 19:51:45.925781279 +0000 UTC m=+0.062438242 container start de90cfa26b90b23729ca95c1956961184fbe2a25bfc50e6cc5e34cdf4e07c0d2 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, release=754, vcs-type=git, com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, ceph=True, io.openshift.tags=base centos centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_CLEAN=True, io.k8s.display-name=CentOS Stream 8, name=centos-stream, io.openshift.expose-services=, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, version=8, io.buildah.version=1.19.8, RELEASE=HEAD, distribution-scope=public, GIT_REPO=https://github.com/ceph/ceph-container.git) 2026-03-09T19:51:46.254 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:45 vm06 bash[49200]: de90cfa26b90b23729ca95c1956961184fbe2a25bfc50e6cc5e34cdf4e07c0d2 2026-03-09T19:51:46.254 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:45 vm06 podman[49200]: 2026-03-09 19:51:45.875429699 +0000 UTC m=+0.012086662 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a 2026-03-09T19:51:46.254 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:45 vm06 systemd[1]: Started Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:51:46.255 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:46.030+0000 7f716a7f3000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T19:51:46.255 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:46.084+0000 7f716a7f3000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T19:51:46.263 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T19:51:46.263 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T19:51:46.263 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000217788 s, 2.4 MB/s 2026-03-09T19:51:46.264 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T19:51:46.298 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdc 2026-03-09T19:51:46.388 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdc 2026-03-09T19:51:46.388 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T19:51:46.388 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-09T19:51:46.388 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T19:51:46.388 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T19:51:46.388 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 19:51:30.516034710 +0000 2026-03-09T19:51:46.388 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 19:51:30.114034354 +0000 2026-03-09T19:51:46.388 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 19:51:30.114034354 +0000 2026-03-09T19:51:46.389 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 19:45:34.285000000 +0000 2026-03-09T19:51:46.389 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T19:51:46.433 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T19:51:46.433 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T19:51:46.433 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000147997 s, 3.5 MB/s 2026-03-09T19:51:46.435 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T19:51:46.475 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdd 2026-03-09T19:51:46.538 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:46.534+0000 7f716a7f3000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T19:51:46.564 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdd 2026-03-09T19:51:46.564 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T19:51:46.564 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-09T19:51:46.564 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T19:51:46.564 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T19:51:46.564 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 19:51:30.594034782 +0000 2026-03-09T19:51:46.564 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 19:51:30.113034353 +0000 2026-03-09T19:51:46.565 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 19:51:30.113034353 +0000 2026-03-09T19:51:46.565 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 19:45:34.288000000 +0000 2026-03-09T19:51:46.565 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T19:51:46.629 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T19:51:46.629 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T19:51:46.629 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000198722 s, 2.6 MB/s 2026-03-09T19:51:46.630 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T19:51:46.689 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vde 2026-03-09T19:51:46.748 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vde 2026-03-09T19:51:46.748 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T19:51:46.748 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-09T19:51:46.749 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T19:51:46.749 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T19:51:46.749 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 19:51:30.658034841 +0000 2026-03-09T19:51:46.749 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 19:51:30.120034358 +0000 2026-03-09T19:51:46.749 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 19:51:30.120034358 +0000 2026-03-09T19:51:46.749 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 19:45:34.292000000 +0000 2026-03-09T19:51:46.749 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T19:51:46.818 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T19:51:46.818 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T19:51:46.819 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000192981 s, 2.7 MB/s 2026-03-09T19:51:46.820 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T19:51:46.882 INFO:tasks.cephadm:Deploying osd.0 on vm01 with /dev/vde... 2026-03-09T19:51:46.882 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- lvm zap /dev/vde 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.070 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:46.921+0000 7f716a7f3000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:47.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:47.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:47.341 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:47 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:47.070+0000 7f716a7f3000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T19:51:47.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:47 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:47.127+0000 7f716a7f3000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T19:51:47.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:47 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:47.306+0000 7f716a7f3000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T19:51:47.498 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:51:47.512 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch daemon add osd vm01:/dev/vde 2026-03-09T19:51:48.227 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:47 vm01 ceph-mon[53908]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:48.227 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:47 vm01 ceph-mon[53908]: Reconfiguring mgr.y (unknown last config time)... 2026-03-09T19:51:48.227 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:47 vm01 ceph-mon[53908]: Reconfiguring daemon mgr.y on vm01 2026-03-09T19:51:48.228 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:47 vm01 ceph-mon[49698]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:48.228 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:47 vm01 ceph-mon[49698]: Reconfiguring mgr.y (unknown last config time)... 2026-03-09T19:51:48.228 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:47 vm01 ceph-mon[49698]: Reconfiguring daemon mgr.y on vm01 2026-03-09T19:51:48.248 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:47 vm06 ceph-mon[48187]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:48.248 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:47 vm06 ceph-mon[48187]: Reconfiguring mgr.y (unknown last config time)... 2026-03-09T19:51:48.248 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:47 vm06 ceph-mon[48187]: Reconfiguring daemon mgr.y on vm01 2026-03-09T19:51:48.248 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:47 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:47.945+0000 7f716a7f3000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T19:51:48.248 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:48 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:48.134+0000 7f716a7f3000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:51:48.248 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:48 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:48.192+0000 7f716a7f3000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T19:51:48.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:48 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:48.248+0000 7f716a7f3000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T19:51:48.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:48 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:48.316+0000 7f716a7f3000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T19:51:48.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:48 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:48.370+0000 7f716a7f3000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T19:51:48.973 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:48 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:48.672+0000 7f716a7f3000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T19:51:48.973 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:48 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:48.748+0000 7f716a7f3000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: from='client.14205 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2610213220' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f103ea3b-bf46-454a-98e8-0d7aaed22607"}]: dispatch 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f103ea3b-bf46-454a-98e8-0d7aaed22607"}]: dispatch 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f103ea3b-bf46-454a-98e8-0d7aaed22607"}]': finished 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: osdmap e5: 1 total, 0 up, 1 in 2026-03-09T19:51:49.316 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:48 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: from='client.14205 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2610213220' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f103ea3b-bf46-454a-98e8-0d7aaed22607"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f103ea3b-bf46-454a-98e8-0d7aaed22607"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f103ea3b-bf46-454a-98e8-0d7aaed22607"}]': finished 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: osdmap e5: 1 total, 0 up, 1 in 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: from='client.14205 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2610213220' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f103ea3b-bf46-454a-98e8-0d7aaed22607"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f103ea3b-bf46-454a-98e8-0d7aaed22607"}]: dispatch 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f103ea3b-bf46-454a-98e8-0d7aaed22607"}]': finished 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: osdmap e5: 1 total, 0 up, 1 in 2026-03-09T19:51:49.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:48 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:49.567 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:49 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:49.316+0000 7f716a7f3000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T19:51:49.567 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:49 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:49.378+0000 7f716a7f3000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T19:51:49.567 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:49 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:49.441+0000 7f716a7f3000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T19:51:49.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:49 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:49.567+0000 7f716a7f3000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T19:51:49.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:49 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:49.629+0000 7f716a7f3000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T19:51:49.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:49 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:49.723+0000 7f716a7f3000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T19:51:49.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:49 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:49.815+0000 7f716a7f3000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:51:50.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:49 vm06 ceph-mon[48187]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:50.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:49 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/675807320' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:51:50.341 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:50 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:50.143+0000 7f716a7f3000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T19:51:50.341 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:51:50 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:51:50.210+0000 7f716a7f3000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T19:51:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:49 vm01 ceph-mon[53908]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:49 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/675807320' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:51:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:49 vm01 ceph-mon[49698]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:49 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/675807320' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:51:51.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:50 vm06 ceph-mon[48187]: Standby manager daemon x started 2026-03-09T19:51:51.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:50 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T19:51:51.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:50 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:51:51.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:50 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T19:51:51.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:50 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[49698]: Standby manager daemon x started 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[53908]: Standby manager daemon x started 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T19:51:51.369 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:50 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/595899589' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:51:52.243 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:51 vm01 ceph-mon[49698]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:52.243 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:51 vm01 ceph-mon[49698]: mgrmap e14: y(active, since 30s), standbys: x 2026-03-09T19:51:52.243 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:51 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T19:51:52.245 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:51 vm01 ceph-mon[53908]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:52.245 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:51 vm01 ceph-mon[53908]: mgrmap e14: y(active, since 30s), standbys: x 2026-03-09T19:51:52.245 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:51 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T19:51:52.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:51 vm06 ceph-mon[48187]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:52.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:51 vm06 ceph-mon[48187]: mgrmap e14: y(active, since 30s), standbys: x 2026-03-09T19:51:52.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:51 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T19:51:53.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T19:51:53.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:53.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:52 vm01 ceph-mon[49698]: Deploying daemon osd.0 on vm01 2026-03-09T19:51:53.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T19:51:53.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:53.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:52 vm01 ceph-mon[53908]: Deploying daemon osd.0 on vm01 2026-03-09T19:51:53.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T19:51:53.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:53.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:52 vm06 ceph-mon[48187]: Deploying daemon osd.0 on vm01 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[49698]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[53908]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:53 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:54.333 INFO:teuthology.orchestra.run.vm01.stdout:Created osd(s) 0 on host 'vm01' 2026-03-09T19:51:54.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:53 vm06 ceph-mon[48187]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:54.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:53 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:54.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:53 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:54.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:53 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:54.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:53 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:54.392 DEBUG:teuthology.orchestra.run.vm01:osd.0> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.0.service 2026-03-09T19:51:54.393 INFO:tasks.cephadm:Deploying osd.1 on vm01 with /dev/vdd... 2026-03-09T19:51:54.393 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- lvm zap /dev/vdd 2026-03-09T19:51:55.046 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 19:51:54 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[56947]: 2026-03-09T19:51:54.933+0000 7fc48ea663c0 -1 osd.0 0 log_to_monitors true 2026-03-09T19:51:55.107 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:51:55.120 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch daemon add osd vm01:/dev/vdd 2026-03-09T19:51:55.315 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:55.315 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:55.315 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:55.315 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:55.315 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:55.315 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:55.315 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[49698]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:55.315 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[49698]: from='osd.0 [v2:192.168.123.101:6802/1503315304,v1:192.168.123.101:6803/1503315304]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[49698]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[53908]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[53908]: from='osd.0 [v2:192.168.123.101:6802/1503315304,v1:192.168.123.101:6803/1503315304]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T19:51:55.316 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:55 vm01 ceph-mon[53908]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T19:51:55.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:55 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:55 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:55 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:55 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:51:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:55 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:55 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:51:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:55 vm06 ceph-mon[48187]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:55 vm06 ceph-mon[48187]: from='osd.0 [v2:192.168.123.101:6802/1503315304,v1:192.168.123.101:6803/1503315304]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T19:51:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:55 vm06 ceph-mon[48187]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T19:51:56.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T19:51:56.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: osdmap e6: 1 total, 0 up, 1 in 2026-03-09T19:51:56.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:56.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='osd.0 [v2:192.168.123.101:6802/1503315304,v1:192.168.123.101:6803/1503315304]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:51:56.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='client.24139 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: Detected new or changed devices on vm01 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: osdmap e6: 1 total, 0 up, 1 in 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='osd.0 [v2:192.168.123.101:6802/1503315304,v1:192.168.123.101:6803/1503315304]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='client.24139 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: Detected new or changed devices on vm01 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:51:56.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:56 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:56.630 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 19:51:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[56947]: 2026-03-09T19:51:56.352+0000 7fc485469700 -1 osd.0 0 waiting for initial osdmap 2026-03-09T19:51:56.630 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 19:51:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[56947]: 2026-03-09T19:51:56.362+0000 7fc481e04700 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: osdmap e6: 1 total, 0 up, 1 in 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='osd.0 [v2:192.168.123.101:6802/1503315304,v1:192.168.123.101:6803/1503315304]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='client.24139 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: Detected new or changed devices on vm01 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:51:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:56 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: osdmap e7: 1 total, 0 up, 1 in 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3569176881' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "52b44b6f-8ac7-4e25-969b-53f2808d1cb7"}]: dispatch 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "52b44b6f-8ac7-4e25-969b-53f2808d1cb7"}]: dispatch 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: osd.0 [v2:192.168.123.101:6802/1503315304,v1:192.168.123.101:6803/1503315304] boot 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "52b44b6f-8ac7-4e25-969b-53f2808d1cb7"}]': finished 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: osdmap e8: 2 total, 1 up, 2 in 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1043361437' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: osdmap e7: 1 total, 0 up, 1 in 2026-03-09T19:51:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3569176881' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "52b44b6f-8ac7-4e25-969b-53f2808d1cb7"}]: dispatch 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "52b44b6f-8ac7-4e25-969b-53f2808d1cb7"}]: dispatch 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: osd.0 [v2:192.168.123.101:6802/1503315304,v1:192.168.123.101:6803/1503315304] boot 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "52b44b6f-8ac7-4e25-969b-53f2808d1cb7"}]': finished 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: osdmap e8: 2 total, 1 up, 2 in 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:57.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:57 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1043361437' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: osdmap e7: 1 total, 0 up, 1 in 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3569176881' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "52b44b6f-8ac7-4e25-969b-53f2808d1cb7"}]: dispatch 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "52b44b6f-8ac7-4e25-969b-53f2808d1cb7"}]: dispatch 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: osd.0 [v2:192.168.123.101:6802/1503315304,v1:192.168.123.101:6803/1503315304] boot 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "52b44b6f-8ac7-4e25-969b-53f2808d1cb7"}]': finished 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: osdmap e8: 2 total, 1 up, 2 in 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:51:58.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:51:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T19:51:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:57 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1043361437' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:51:59.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:58 vm06 ceph-mon[48187]: purged_snaps scrub starts 2026-03-09T19:51:59.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:58 vm06 ceph-mon[48187]: purged_snaps scrub ok 2026-03-09T19:51:59.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:58 vm06 ceph-mon[48187]: osdmap e9: 2 total, 1 up, 2 in 2026-03-09T19:51:59.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:58 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:51:59.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:58 vm01 ceph-mon[49698]: purged_snaps scrub starts 2026-03-09T19:51:59.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:58 vm01 ceph-mon[49698]: purged_snaps scrub ok 2026-03-09T19:51:59.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:58 vm01 ceph-mon[49698]: osdmap e9: 2 total, 1 up, 2 in 2026-03-09T19:51:59.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:58 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:51:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:58 vm01 ceph-mon[53908]: purged_snaps scrub starts 2026-03-09T19:51:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:58 vm01 ceph-mon[53908]: purged_snaps scrub ok 2026-03-09T19:51:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:58 vm01 ceph-mon[53908]: osdmap e9: 2 total, 1 up, 2 in 2026-03-09T19:51:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:58 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:00.009 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:51:59 vm01 ceph-mon[49698]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:00.010 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:51:59 vm01 ceph-mon[53908]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:00.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:51:59 vm06 ceph-mon[48187]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:00.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:00 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T19:52:00.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:00 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:00.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:00 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T19:52:00.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:00 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:01.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:00 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T19:52:01.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:00 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:01.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:01 vm01 ceph-mon[49698]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:01.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:01 vm01 ceph-mon[49698]: Deploying daemon osd.1 on vm01 2026-03-09T19:52:01.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:01 vm01 ceph-mon[53908]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:01.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:01 vm01 ceph-mon[53908]: Deploying daemon osd.1 on vm01 2026-03-09T19:52:02.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:01 vm06 ceph-mon[48187]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:01 vm06 ceph-mon[48187]: Deploying daemon osd.1 on vm01 2026-03-09T19:52:03.081 INFO:teuthology.orchestra.run.vm01.stdout:Created osd(s) 1 on host 'vm01' 2026-03-09T19:52:03.153 DEBUG:teuthology.orchestra.run.vm01:osd.1> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.1.service 2026-03-09T19:52:03.154 INFO:tasks.cephadm:Deploying osd.2 on vm01 with /dev/vdc... 2026-03-09T19:52:03.154 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- lvm zap /dev/vdc 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:03.228 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:03.477 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:03 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:03.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:03 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:03.880 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 19:52:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[59666]: 2026-03-09T19:52:03.586+0000 7f63c2ca53c0 -1 osd.1 0 log_to_monitors true 2026-03-09T19:52:04.327 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:04 vm01 ceph-mon[49698]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:04.327 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:04 vm01 ceph-mon[49698]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T19:52:04.327 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:04 vm01 ceph-mon[49698]: from='osd.1 [v2:192.168.123.101:6810/705098126,v1:192.168.123.101:6811/705098126]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T19:52:04.327 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:04 vm01 ceph-mon[53908]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:04.327 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:04 vm01 ceph-mon[53908]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T19:52:04.327 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:04 vm01 ceph-mon[53908]: from='osd.1 [v2:192.168.123.101:6810/705098126,v1:192.168.123.101:6811/705098126]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T19:52:04.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:04 vm06 ceph-mon[48187]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:04.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:04 vm06 ceph-mon[48187]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T19:52:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:04 vm06 ceph-mon[48187]: from='osd.1 [v2:192.168.123.101:6810/705098126,v1:192.168.123.101:6811/705098126]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T19:52:04.754 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:52:04.771 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch daemon add osd vm01:/dev/vdc 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: osdmap e10: 2 total, 1 up, 2 in 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: from='osd.1 [v2:192.168.123.101:6810/705098126,v1:192.168.123.101:6811/705098126]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: Detected new or changed devices on vm01 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[49698]: pgmap v22: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:05.531 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T19:52:05.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: osdmap e10: 2 total, 1 up, 2 in 2026-03-09T19:52:05.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:05.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:05.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: from='osd.1 [v2:192.168.123.101:6810/705098126,v1:192.168.123.101:6811/705098126]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:05.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: Detected new or changed devices on vm01 2026-03-09T19:52:05.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:05.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:05.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:05.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:05 vm01 ceph-mon[53908]: pgmap v22: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:05.532 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 19:52:05 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[59666]: 2026-03-09T19:52:05.250+0000 7f63b96a8700 -1 osd.1 0 waiting for initial osdmap 2026-03-09T19:52:05.532 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 19:52:05 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[59666]: 2026-03-09T19:52:05.256+0000 7f63b6043700 -1 osd.1 11 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T19:52:05.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T19:52:05.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: osdmap e10: 2 total, 1 up, 2 in 2026-03-09T19:52:05.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: from='osd.1 [v2:192.168.123.101:6810/705098126,v1:192.168.123.101:6811/705098126]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: Detected new or changed devices on vm01 2026-03-09T19:52:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:05 vm06 ceph-mon[48187]: pgmap v22: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T19:52:06.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: osdmap e11: 2 total, 1 up, 2 in 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='client.14253 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "98d1fc31-9ae8-4c8e-bed3-b4c00f6da774"}]: dispatch 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/603245793' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "98d1fc31-9ae8-4c8e-bed3-b4c00f6da774"}]: dispatch 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: osd.1 [v2:192.168.123.101:6810/705098126,v1:192.168.123.101:6811/705098126] boot 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "98d1fc31-9ae8-4c8e-bed3-b4c00f6da774"}]': finished 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: osdmap e12: 3 total, 2 up, 3 in 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:06 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: osdmap e11: 2 total, 1 up, 2 in 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='client.14253 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "98d1fc31-9ae8-4c8e-bed3-b4c00f6da774"}]: dispatch 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/603245793' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "98d1fc31-9ae8-4c8e-bed3-b4c00f6da774"}]: dispatch 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: osd.1 [v2:192.168.123.101:6810/705098126,v1:192.168.123.101:6811/705098126] boot 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "98d1fc31-9ae8-4c8e-bed3-b4c00f6da774"}]': finished 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: osdmap e12: 3 total, 2 up, 3 in 2026-03-09T19:52:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: osdmap e11: 2 total, 1 up, 2 in 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='client.14253 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "98d1fc31-9ae8-4c8e-bed3-b4c00f6da774"}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/603245793' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "98d1fc31-9ae8-4c8e-bed3-b4c00f6da774"}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: osd.1 [v2:192.168.123.101:6810/705098126,v1:192.168.123.101:6811/705098126] boot 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "98d1fc31-9ae8-4c8e-bed3-b4c00f6da774"}]': finished 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: osdmap e12: 3 total, 2 up, 3 in 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:52:06.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:06 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:07.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:07 vm06 ceph-mon[48187]: purged_snaps scrub starts 2026-03-09T19:52:07.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:07 vm06 ceph-mon[48187]: purged_snaps scrub ok 2026-03-09T19:52:07.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:07 vm06 ceph-mon[48187]: pgmap v25: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:07.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:07 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1735078241' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:07.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:07 vm01 ceph-mon[49698]: purged_snaps scrub starts 2026-03-09T19:52:07.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:07 vm01 ceph-mon[49698]: purged_snaps scrub ok 2026-03-09T19:52:07.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:07 vm01 ceph-mon[49698]: pgmap v25: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:07.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:07 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1735078241' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:07.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:07 vm01 ceph-mon[53908]: purged_snaps scrub starts 2026-03-09T19:52:07.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:07 vm01 ceph-mon[53908]: purged_snaps scrub ok 2026-03-09T19:52:07.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:07 vm01 ceph-mon[53908]: pgmap v25: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:07.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:07 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1735078241' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:08.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:08 vm06 ceph-mon[48187]: osdmap e13: 3 total, 2 up, 3 in 2026-03-09T19:52:08.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:08 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:08.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:08 vm01 ceph-mon[49698]: osdmap e13: 3 total, 2 up, 3 in 2026-03-09T19:52:08.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:08 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:08.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:08 vm01 ceph-mon[53908]: osdmap e13: 3 total, 2 up, 3 in 2026-03-09T19:52:08.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:08 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:09.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:09 vm01 ceph-mon[49698]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:09.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:09 vm01 ceph-mon[53908]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:09.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:09 vm06 ceph-mon[48187]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:10.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:10 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T19:52:10.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:10 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:10.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:10 vm01 ceph-mon[49698]: Deploying daemon osd.2 on vm01 2026-03-09T19:52:10.353 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:10 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T19:52:10.354 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:10 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:10.354 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:10 vm01 ceph-mon[53908]: Deploying daemon osd.2 on vm01 2026-03-09T19:52:10.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:10 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T19:52:10.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:10 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:10.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:10 vm06 ceph-mon[48187]: Deploying daemon osd.2 on vm01 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[49698]: pgmap v28: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[53908]: pgmap v28: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:11.514 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:11 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:11.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:11 vm06 ceph-mon[48187]: pgmap v28: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:11.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:11 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:11.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:11 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:11.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:11 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:11.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:11 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:12.098 INFO:teuthology.orchestra.run.vm01.stdout:Created osd(s) 2 on host 'vm01' 2026-03-09T19:52:12.145 DEBUG:teuthology.orchestra.run.vm01:osd.2> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.2.service 2026-03-09T19:52:12.148 INFO:tasks.cephadm:Deploying osd.3 on vm01 with /dev/vdb... 2026-03-09T19:52:12.148 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- lvm zap /dev/vdb 2026-03-09T19:52:12.798 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:52:12.813 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch daemon add osd vm01:/dev/vdb 2026-03-09T19:52:13.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:13 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:13.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:13 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:13.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:13 vm06 ceph-mon[48187]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T19:52:13.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:13 vm06 ceph-mon[48187]: from='osd.2 [v2:192.168.123.101:6818/3336480273,v1:192.168.123.101:6819/3336480273]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T19:52:13.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:13 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:13.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:13 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:13.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:13 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:13.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:13 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[49698]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[49698]: from='osd.2 [v2:192.168.123.101:6818/3336480273,v1:192.168.123.101:6819/3336480273]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:13 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:13 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:13 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[53908]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[53908]: from='osd.2 [v2:192.168.123.101:6818/3336480273,v1:192.168.123.101:6819/3336480273]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:13.353 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:12 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:14.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: pgmap v29: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: osdmap e14: 3 total, 2 up, 3 in 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='osd.2 [v2:192.168.123.101:6818/3336480273,v1:192.168.123.101:6819/3336480273]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:14 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: pgmap v29: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: osdmap e14: 3 total, 2 up, 3 in 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='osd.2 [v2:192.168.123.101:6818/3336480273,v1:192.168.123.101:6819/3336480273]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:14.380 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 19:52:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[62424]: 2026-03-09T19:52:14.018+0000 7efe08d0c700 -1 osd.2 0 waiting for initial osdmap 2026-03-09T19:52:14.380 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 19:52:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[62424]: 2026-03-09T19:52:14.026+0000 7efe02ea2700 -1 osd.2 15 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: pgmap v29: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: osdmap e14: 3 total, 2 up, 3 in 2026-03-09T19:52:14.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:14.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='osd.2 [v2:192.168.123.101:6818/3336480273,v1:192.168.123.101:6819/3336480273]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:14.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:14.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:14.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:14.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:14.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:14.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:14.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:14 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:15.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='client.24172 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:15.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: Detected new or changed devices on vm01 2026-03-09T19:52:15.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:52:15.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: osdmap e15: 3 total, 2 up, 3 in 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76c92c68-25ba-439b-bf8a-3eedaeec05b5"}]: dispatch 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2250187905' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76c92c68-25ba-439b-bf8a-3eedaeec05b5"}]: dispatch 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: osd.2 [v2:192.168.123.101:6818/3336480273,v1:192.168.123.101:6819/3336480273] boot 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "76c92c68-25ba-439b-bf8a-3eedaeec05b5"}]': finished 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: osdmap e16: 4 total, 3 up, 4 in 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-09T19:52:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:15 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3129361673' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='client.24172 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: Detected new or changed devices on vm01 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: osdmap e15: 3 total, 2 up, 3 in 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76c92c68-25ba-439b-bf8a-3eedaeec05b5"}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2250187905' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76c92c68-25ba-439b-bf8a-3eedaeec05b5"}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: osd.2 [v2:192.168.123.101:6818/3336480273,v1:192.168.123.101:6819/3336480273] boot 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "76c92c68-25ba-439b-bf8a-3eedaeec05b5"}]': finished 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: osdmap e16: 4 total, 3 up, 4 in 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3129361673' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='client.24172 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: Detected new or changed devices on vm01 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: osdmap e15: 3 total, 2 up, 3 in 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76c92c68-25ba-439b-bf8a-3eedaeec05b5"}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2250187905' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "76c92c68-25ba-439b-bf8a-3eedaeec05b5"}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: osd.2 [v2:192.168.123.101:6818/3336480273,v1:192.168.123.101:6819/3336480273] boot 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "76c92c68-25ba-439b-bf8a-3eedaeec05b5"}]': finished 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: osdmap e16: 4 total, 3 up, 4 in 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-09T19:52:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:15 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3129361673' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:16.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:16 vm06 ceph-mon[48187]: purged_snaps scrub starts 2026-03-09T19:52:16.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:16 vm06 ceph-mon[48187]: purged_snaps scrub ok 2026-03-09T19:52:16.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:16 vm06 ceph-mon[48187]: pgmap v33: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:16.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:16 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-09T19:52:16.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:16 vm06 ceph-mon[48187]: osdmap e17: 4 total, 3 up, 4 in 2026-03-09T19:52:16.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:16 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:16.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:16 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[49698]: purged_snaps scrub starts 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[49698]: purged_snaps scrub ok 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[49698]: pgmap v33: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[49698]: osdmap e17: 4 total, 3 up, 4 in 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[53908]: purged_snaps scrub starts 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[53908]: purged_snaps scrub ok 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[53908]: pgmap v33: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[53908]: osdmap e17: 4 total, 3 up, 4 in 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:16.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:16 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-09T19:52:17.589 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:17 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-09T19:52:17.589 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:17 vm01 ceph-mon[49698]: osdmap e18: 4 total, 3 up, 4 in 2026-03-09T19:52:17.589 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:17 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:17.589 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:17 vm01 ceph-mon[49698]: pgmap v36: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:17.589 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:17 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-09T19:52:17.589 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:17 vm01 ceph-mon[53908]: osdmap e18: 4 total, 3 up, 4 in 2026-03-09T19:52:17.589 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:17 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:17.589 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:17 vm01 ceph-mon[53908]: pgmap v36: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:17.589 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64829]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-09T19:52:17.589 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64829]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T19:52:17.589 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64829]: pam_unix(sudo:session): session closed for user root 2026-03-09T19:52:17.589 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64840]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-09T19:52:17.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:17 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-09T19:52:17.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:17 vm06 ceph-mon[48187]: osdmap e18: 4 total, 3 up, 4 in 2026-03-09T19:52:17.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:17 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:17.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:17 vm06 ceph-mon[48187]: pgmap v36: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:17.882 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64881]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-09T19:52:17.882 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64881]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T19:52:17.882 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64881]: pam_unix(sudo:session): session closed for user root 2026-03-09T19:52:17.882 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64840]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T19:52:17.882 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64840]: pam_unix(sudo:session): session closed for user root 2026-03-09T19:52:18.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64929]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T19:52:18.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64929]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T19:52:18.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:17 vm01 sudo[64929]: pam_unix(sudo:session): session closed for user root 2026-03-09T19:52:18.225 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 sudo[64995]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T19:52:18.225 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 sudo[64995]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T19:52:18.225 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 sudo[64995]: pam_unix(sudo:session): session closed for user root 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: osdmap e19: 4 total, 3 up, 4 in 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: Deploying daemon osd.3 on vm01 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:52:18.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: osdmap e19: 4 total, 3 up, 4 in 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: Deploying daemon osd.3 on vm01 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:52:18.543 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:18 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:52:18.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 sudo[49760]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 sudo[49760]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 sudo[49760]: pam_unix(sudo:session): session closed for user root 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: osdmap e19: 4 total, 3 up, 4 in 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: Deploying daemon osd.3 on vm01 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:52:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:18 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[49698]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[49698]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[49698]: pgmap v38: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[53908]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[53908]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[53908]: pgmap v38: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:19.401 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:19 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:19.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:19 vm06 ceph-mon[48187]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T19:52:19.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:19 vm06 ceph-mon[48187]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T19:52:19.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:19 vm06 ceph-mon[48187]: pgmap v38: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:19.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:19 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:19.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:19 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:19.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:19 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:19.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:19 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:20.262 INFO:teuthology.orchestra.run.vm01.stdout:Created osd(s) 3 on host 'vm01' 2026-03-09T19:52:20.329 DEBUG:teuthology.orchestra.run.vm01:osd.3> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.3.service 2026-03-09T19:52:20.330 INFO:tasks.cephadm:Deploying osd.4 on vm06 with /dev/vde... 2026-03-09T19:52:20.330 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- lvm zap /dev/vde 2026-03-09T19:52:20.533 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[49698]: mgrmap e15: y(active, since 58s), standbys: x 2026-03-09T19:52:20.533 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:20.533 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:20.533 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:20.533 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:20.533 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:20.533 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:20.533 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[53908]: mgrmap e15: y(active, since 58s), standbys: x 2026-03-09T19:52:20.533 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:20.534 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:20.534 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:20.534 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:20.534 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:20.534 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:20 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:20.610 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:20 vm06 ceph-mon[48187]: mgrmap e15: y(active, since 58s), standbys: x 2026-03-09T19:52:20.610 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:20 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:20.610 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:20 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:20.610 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:20 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:20.610 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:20 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:20.610 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:20 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:20.610 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:20 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:20.978 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T19:52:20.992 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch daemon add osd vm06:/dev/vde 2026-03-09T19:52:21.380 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 19:52:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[65254]: 2026-03-09T19:52:20.930+0000 7f9566c903c0 -1 osd.3 0 log_to_monitors true 2026-03-09T19:52:21.679 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:21 vm06 ceph-mon[48187]: pgmap v39: 1 pgs: 1 creating+peering; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:21.679 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:21 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:52:21.679 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:21 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:52:21.679 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:21 vm06 ceph-mon[48187]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T19:52:21.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:21 vm01 ceph-mon[49698]: pgmap v39: 1 pgs: 1 creating+peering; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:21.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:52:21.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:21 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:52:21.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:21 vm01 ceph-mon[49698]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T19:52:21.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:21 vm01 ceph-mon[53908]: pgmap v39: 1 pgs: 1 creating+peering; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:21.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:21 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:52:21.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:21 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:52:21.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:21 vm01 ceph-mon[53908]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T19:52:22.629 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 19:52:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[65254]: 2026-03-09T19:52:22.241+0000 7f955ee96700 -1 osd.3 0 waiting for initial osdmap 2026-03-09T19:52:22.629 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 19:52:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[65254]: 2026-03-09T19:52:22.248+0000 7f955a02e700 -1 osd.3 21 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T19:52:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: osdmap e20: 4 total, 3 up, 4 in 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='client.14292 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: Detected new or changed devices on vm01 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/4079446984' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6b56be69-0ed2-425d-90bc-bb41ca45e362"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/4079446984' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "6b56be69-0ed2-425d-90bc-bb41ca45e362"}]': finished 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: osdmap e21: 5 total, 3 up, 5 in 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: osdmap e20: 4 total, 3 up, 4 in 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='client.14292 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: Detected new or changed devices on vm01 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/4079446984' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6b56be69-0ed2-425d-90bc-bb41ca45e362"}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/4079446984' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "6b56be69-0ed2-425d-90bc-bb41ca45e362"}]': finished 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: osdmap e21: 5 total, 3 up, 5 in 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:22 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: osdmap e20: 4 total, 3 up, 4 in 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='client.14292 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: Detected new or changed devices on vm01 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/4079446984' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6b56be69-0ed2-425d-90bc-bb41ca45e362"}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/4079446984' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "6b56be69-0ed2-425d-90bc-bb41ca45e362"}]': finished 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: osdmap e21: 5 total, 3 up, 5 in 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:22 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:23.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:23 vm06 ceph-mon[48187]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:23.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:23 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/4196245470' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:23.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:23 vm06 ceph-mon[48187]: osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769] boot 2026-03-09T19:52:23.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:23 vm06 ceph-mon[48187]: osdmap e22: 5 total, 4 up, 5 in 2026-03-09T19:52:23.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:23 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:23.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:23 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[53908]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/4196245470' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[53908]: osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769] boot 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[53908]: osdmap e22: 5 total, 4 up, 5 in 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[49698]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/4196245470' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[49698]: osd.3 [v2:192.168.123.101:6826/3485527769,v1:192.168.123.101:6827/3485527769] boot 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[49698]: osdmap e22: 5 total, 4 up, 5 in 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:52:23.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:23 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:24.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:24 vm01 ceph-mon[53908]: purged_snaps scrub starts 2026-03-09T19:52:24.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:24 vm01 ceph-mon[53908]: purged_snaps scrub ok 2026-03-09T19:52:24.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:24 vm01 ceph-mon[49698]: purged_snaps scrub starts 2026-03-09T19:52:24.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:24 vm01 ceph-mon[49698]: purged_snaps scrub ok 2026-03-09T19:52:24.904 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:24 vm06 ceph-mon[48187]: purged_snaps scrub starts 2026-03-09T19:52:24.904 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:24 vm06 ceph-mon[48187]: purged_snaps scrub ok 2026-03-09T19:52:25.768 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:25 vm06 ceph-mon[48187]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:25.768 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:25 vm06 ceph-mon[48187]: osdmap e23: 5 total, 4 up, 5 in 2026-03-09T19:52:25.768 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:25 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:25.768 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:25 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T19:52:25.768 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:25 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[53908]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[53908]: osdmap e23: 5 total, 4 up, 5 in 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[49698]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[49698]: osdmap e23: 5 total, 4 up, 5 in 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T19:52:25.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:25 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:26 vm06 ceph-mon[48187]: Deploying daemon osd.4 on vm06 2026-03-09T19:52:26.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:26 vm01 ceph-mon[53908]: Deploying daemon osd.4 on vm06 2026-03-09T19:52:26.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:26 vm01 ceph-mon[49698]: Deploying daemon osd.4 on vm06 2026-03-09T19:52:27.904 INFO:teuthology.orchestra.run.vm06.stdout:Created osd(s) 4 on host 'vm06' 2026-03-09T19:52:27.979 DEBUG:teuthology.orchestra.run.vm06:osd.4> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.4.service 2026-03-09T19:52:27.980 INFO:tasks.cephadm:Deploying osd.5 on vm06 with /dev/vdd... 2026-03-09T19:52:27.980 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- lvm zap /dev/vdd 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:28.202 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:28 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:28.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:28 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:29.179 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:29 vm06 ceph-mon[48187]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T19:52:29.179 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:29 vm06 ceph-mon[48187]: from='osd.4 [v2:192.168.123.106:6800/1291324120,v1:192.168.123.106:6801/1291324120]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T19:52:29.179 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:29 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:29.179 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:29 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:29.179 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:29 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:29.179 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 19:52:28 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[51498]: 2026-03-09T19:52:28.917+0000 7f77a9df03c0 -1 osd.4 0 log_to_monitors true 2026-03-09T19:52:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[49698]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T19:52:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[49698]: from='osd.4 [v2:192.168.123.106:6800/1291324120,v1:192.168.123.106:6801/1291324120]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T19:52:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:29.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:29.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[53908]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T19:52:29.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[53908]: from='osd.4 [v2:192.168.123.106:6800/1291324120,v1:192.168.123.106:6801/1291324120]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T19:52:29.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:29.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:29.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:29 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:29.480 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T19:52:29.495 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch daemon add osd vm06:/dev/vdd 2026-03-09T19:52:30.239 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 19:52:30 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[51498]: 2026-03-09T19:52:30.099+0000 7f77a07f3700 -1 osd.4 0 waiting for initial osdmap 2026-03-09T19:52:30.239 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 19:52:30 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[51498]: 2026-03-09T19:52:30.109+0000 7f779b18a700 -1 osd.4 25 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: Detected new or changed devices on vm06 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: Adjusting osd_memory_target on vm06 to 257.0M 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: Unable to set osd_memory_target on vm06 to 269530726: error parsing value: Value '269530726' is below minimum 939524096 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: osdmap e24: 5 total, 4 up, 5 in 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: from='osd.4 [v2:192.168.123.106:6800/1291324120,v1:192.168.123.106:6801/1291324120]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:30.239 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:30 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: Detected new or changed devices on vm06 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: Adjusting osd_memory_target on vm06 to 257.0M 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: Unable to set osd_memory_target on vm06 to 269530726: error parsing value: Value '269530726' is below minimum 939524096 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: osdmap e24: 5 total, 4 up, 5 in 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: from='osd.4 [v2:192.168.123.106:6800/1291324120,v1:192.168.123.106:6801/1291324120]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: Detected new or changed devices on vm06 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: Adjusting osd_memory_target on vm06 to 257.0M 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: Unable to set osd_memory_target on vm06 to 269530726: error parsing value: Value '269530726' is below minimum 939524096 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: osdmap e24: 5 total, 4 up, 5 in 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:30.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: from='osd.4 [v2:192.168.123.106:6800/1291324120,v1:192.168.123.106:6801/1291324120]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:30.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:30.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:30.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:30.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:30 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: from='client.24203 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: osdmap e25: 5 total, 4 up, 5 in 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ac39182-ee32-432b-bcf1-842662adc4ce"}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/3027133530' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ac39182-ee32-432b-bcf1-842662adc4ce"}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: osd.4 [v2:192.168.123.106:6800/1291324120,v1:192.168.123.106:6801/1291324120] boot 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2ac39182-ee32-432b-bcf1-842662adc4ce"}]': finished 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: osdmap e26: 6 total, 5 up, 6 in 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: from='client.24203 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: osdmap e25: 5 total, 4 up, 5 in 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ac39182-ee32-432b-bcf1-842662adc4ce"}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/3027133530' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ac39182-ee32-432b-bcf1-842662adc4ce"}]: dispatch 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: osd.4 [v2:192.168.123.106:6800/1291324120,v1:192.168.123.106:6801/1291324120] boot 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2ac39182-ee32-432b-bcf1-842662adc4ce"}]': finished 2026-03-09T19:52:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: osdmap e26: 6 total, 5 up, 6 in 2026-03-09T19:52:31.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:31.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:31 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: from='client.24203 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: osdmap e25: 5 total, 4 up, 5 in 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ac39182-ee32-432b-bcf1-842662adc4ce"}]: dispatch 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/3027133530' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ac39182-ee32-432b-bcf1-842662adc4ce"}]: dispatch 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: osd.4 [v2:192.168.123.106:6800/1291324120,v1:192.168.123.106:6801/1291324120] boot 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2ac39182-ee32-432b-bcf1-842662adc4ce"}]': finished 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: osdmap e26: 6 total, 5 up, 6 in 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:52:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:31 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[49698]: purged_snaps scrub starts 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[49698]: purged_snaps scrub ok 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[49698]: pgmap v50: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/3539490501' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[49698]: osdmap e27: 6 total, 5 up, 6 in 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[53908]: purged_snaps scrub starts 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[53908]: purged_snaps scrub ok 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[53908]: pgmap v50: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/3539490501' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[53908]: osdmap e27: 6 total, 5 up, 6 in 2026-03-09T19:52:32.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:32 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:32.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:32 vm06 ceph-mon[48187]: purged_snaps scrub starts 2026-03-09T19:52:32.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:32 vm06 ceph-mon[48187]: purged_snaps scrub ok 2026-03-09T19:52:32.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:32 vm06 ceph-mon[48187]: pgmap v50: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-09T19:52:32.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:32 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/3539490501' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:32.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:32 vm06 ceph-mon[48187]: osdmap e27: 6 total, 5 up, 6 in 2026-03-09T19:52:32.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:32 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:33.714 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:33 vm06 ceph-mon[48187]: pgmap v53: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T19:52:34.081 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:33 vm06 ceph-mon[48187]: osdmap e28: 6 total, 5 up, 6 in 2026-03-09T19:52:34.081 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:33 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:34.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:33 vm01 ceph-mon[49698]: pgmap v53: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T19:52:34.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:33 vm01 ceph-mon[49698]: osdmap e28: 6 total, 5 up, 6 in 2026-03-09T19:52:34.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:33 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:34.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:33 vm01 ceph-mon[53908]: pgmap v53: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T19:52:34.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:33 vm01 ceph-mon[53908]: osdmap e28: 6 total, 5 up, 6 in 2026-03-09T19:52:34.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:33 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:34.969 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:34 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T19:52:34.969 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:34 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:34.969 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:34 vm06 ceph-mon[48187]: Deploying daemon osd.5 on vm06 2026-03-09T19:52:35.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:34 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T19:52:35.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:34 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:35.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:34 vm01 ceph-mon[49698]: Deploying daemon osd.5 on vm06 2026-03-09T19:52:35.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:34 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T19:52:35.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:34 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:35.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:34 vm01 ceph-mon[53908]: Deploying daemon osd.5 on vm06 2026-03-09T19:52:35.805 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:35 vm06 ceph-mon[48187]: pgmap v55: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T19:52:36.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:35 vm01 ceph-mon[49698]: pgmap v55: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T19:52:36.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:35 vm01 ceph-mon[53908]: pgmap v55: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T19:52:36.651 INFO:teuthology.orchestra.run.vm06.stdout:Created osd(s) 5 on host 'vm06' 2026-03-09T19:52:36.711 DEBUG:teuthology.orchestra.run.vm06:osd.5> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.5.service 2026-03-09T19:52:36.712 INFO:tasks.cephadm:Deploying osd.6 on vm06 with /dev/vdc... 2026-03-09T19:52:36.712 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- lvm zap /dev/vdc 2026-03-09T19:52:36.939 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:36 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:36.939 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:36 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:36.939 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:36 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:36.939 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:36 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:36.939 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:36 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:36.939 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:36 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:37.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:36 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:37.423 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T19:52:37.438 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch daemon add osd vm06:/dev/vdc 2026-03-09T19:52:37.896 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:37 vm06 ceph-mon[48187]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:38.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:37 vm01 ceph-mon[49698]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:38.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:37 vm01 ceph-mon[53908]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:38.286 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 19:52:37 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[54214]: 2026-03-09T19:52:37.895+0000 7fe1d03323c0 -1 osd.5 0 log_to_monitors true 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='osd.5 [v2:192.168.123.106:6808/3281344724,v1:192.168.123.106:6809/3281344724]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='client.24235 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:38.866 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:38 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:38.866 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 19:52:38 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[54214]: 2026-03-09T19:52:38.854+0000 7fe1c8538700 -1 osd.5 0 waiting for initial osdmap 2026-03-09T19:52:38.866 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 19:52:38 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[54214]: 2026-03-09T19:52:38.865+0000 7fe1c36d0700 -1 osd.5 30 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='osd.5 [v2:192.168.123.106:6808/3281344724,v1:192.168.123.106:6809/3281344724]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='client.24235 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:39.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='osd.5 [v2:192.168.123.106:6808/3281344724,v1:192.168.123.106:6809/3281344724]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='client.24235 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:39.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:38 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: Detected new or changed devices on vm06 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: Adjusting osd_memory_target on vm06 to 128.5M 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: Unable to set osd_memory_target on vm06 to 134765363: error parsing value: Value '134765363' is below minimum 939524096 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 50 KiB/s, 0 objects/s recovering 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: osdmap e29: 6 total, 5 up, 6 in 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='osd.5 [v2:192.168.123.106:6808/3281344724,v1:192.168.123.106:6809/3281344724]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/131812851' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "021b1d92-8c4d-47f3-aa12-6b46528c6c64"}]: dispatch 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "021b1d92-8c4d-47f3-aa12-6b46528c6c64"}]: dispatch 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "021b1d92-8c4d-47f3-aa12-6b46528c6c64"}]': finished 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: osdmap e30: 7 total, 5 up, 7 in 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:39 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/3875498074' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: Detected new or changed devices on vm06 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: Adjusting osd_memory_target on vm06 to 128.5M 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: Unable to set osd_memory_target on vm06 to 134765363: error parsing value: Value '134765363' is below minimum 939524096 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 50 KiB/s, 0 objects/s recovering 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: osdmap e29: 6 total, 5 up, 6 in 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='osd.5 [v2:192.168.123.106:6808/3281344724,v1:192.168.123.106:6809/3281344724]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/131812851' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "021b1d92-8c4d-47f3-aa12-6b46528c6c64"}]: dispatch 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "021b1d92-8c4d-47f3-aa12-6b46528c6c64"}]: dispatch 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "021b1d92-8c4d-47f3-aa12-6b46528c6c64"}]': finished 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: osdmap e30: 7 total, 5 up, 7 in 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/3875498074' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: Detected new or changed devices on vm06 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: Adjusting osd_memory_target on vm06 to 128.5M 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: Unable to set osd_memory_target on vm06 to 134765363: error parsing value: Value '134765363' is below minimum 939524096 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 50 KiB/s, 0 objects/s recovering 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: osdmap e29: 6 total, 5 up, 6 in 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='osd.5 [v2:192.168.123.106:6808/3281344724,v1:192.168.123.106:6809/3281344724]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/131812851' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "021b1d92-8c4d-47f3-aa12-6b46528c6c64"}]: dispatch 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "021b1d92-8c4d-47f3-aa12-6b46528c6c64"}]: dispatch 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "021b1d92-8c4d-47f3-aa12-6b46528c6c64"}]': finished 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: osdmap e30: 7 total, 5 up, 7 in 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:39 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/3875498074' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:41.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:40 vm01 ceph-mon[49698]: osd.5 [v2:192.168.123.106:6808/3281344724,v1:192.168.123.106:6809/3281344724] boot 2026-03-09T19:52:41.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:40 vm01 ceph-mon[49698]: osdmap e31: 7 total, 6 up, 7 in 2026-03-09T19:52:41.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:40 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:41.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:40 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:41.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:40 vm01 ceph-mon[53908]: osd.5 [v2:192.168.123.106:6808/3281344724,v1:192.168.123.106:6809/3281344724] boot 2026-03-09T19:52:41.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:40 vm01 ceph-mon[53908]: osdmap e31: 7 total, 6 up, 7 in 2026-03-09T19:52:41.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:40 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:41.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:40 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:41.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:40 vm06 ceph-mon[48187]: osd.5 [v2:192.168.123.106:6808/3281344724,v1:192.168.123.106:6809/3281344724] boot 2026-03-09T19:52:41.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:40 vm06 ceph-mon[48187]: osdmap e31: 7 total, 6 up, 7 in 2026-03-09T19:52:41.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:40 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:52:41.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:40 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:42.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:41 vm06 ceph-mon[48187]: purged_snaps scrub starts 2026-03-09T19:52:42.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:41 vm06 ceph-mon[48187]: purged_snaps scrub ok 2026-03-09T19:52:42.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:41 vm06 ceph-mon[48187]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:42.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:41 vm06 ceph-mon[48187]: osdmap e32: 7 total, 6 up, 7 in 2026-03-09T19:52:42.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:41 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[49698]: purged_snaps scrub starts 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[49698]: purged_snaps scrub ok 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[49698]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[49698]: osdmap e32: 7 total, 6 up, 7 in 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[53908]: purged_snaps scrub starts 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[53908]: purged_snaps scrub ok 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[53908]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[53908]: osdmap e32: 7 total, 6 up, 7 in 2026-03-09T19:52:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:41 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:43.088 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:42 vm06 ceph-mon[48187]: osdmap e33: 7 total, 6 up, 7 in 2026-03-09T19:52:43.088 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:42 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:43.088 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:42 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T19:52:43.088 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:42 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:43.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:42 vm01 ceph-mon[49698]: osdmap e33: 7 total, 6 up, 7 in 2026-03-09T19:52:43.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:42 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:43.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:42 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T19:52:43.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:42 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:43.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:42 vm01 ceph-mon[53908]: osdmap e33: 7 total, 6 up, 7 in 2026-03-09T19:52:43.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:42 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:43.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:42 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T19:52:43.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:42 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:43.908 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:43 vm06 ceph-mon[48187]: Deploying daemon osd.6 on vm06 2026-03-09T19:52:43.908 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:43 vm06 ceph-mon[48187]: pgmap v64: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-09T19:52:43.908 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:43.908 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:43.908 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:43.908 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:43 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[49698]: Deploying daemon osd.6 on vm06 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[49698]: pgmap v64: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[53908]: Deploying daemon osd.6 on vm06 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[53908]: pgmap v64: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:43 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:44.690 INFO:teuthology.orchestra.run.vm06.stdout:Created osd(s) 6 on host 'vm06' 2026-03-09T19:52:44.736 DEBUG:teuthology.orchestra.run.vm06:osd.6> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.6.service 2026-03-09T19:52:44.738 INFO:tasks.cephadm:Deploying osd.7 on vm06 with /dev/vdb... 2026-03-09T19:52:44.738 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- lvm zap /dev/vdb 2026-03-09T19:52:45.435 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T19:52:45.454 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch daemon add osd vm06:/dev/vdb 2026-03-09T19:52:45.657 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:45 vm06 ceph-mon[48187]: pgmap v65: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-09T19:52:45.657 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:45.657 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:45.657 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:45.657 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:45.657 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:45.657 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:45 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:45.657 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:45 vm06 ceph-mon[48187]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T19:52:45.657 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:45 vm06 ceph-mon[48187]: from='osd.6 [v2:192.168.123.106:6816/380370602,v1:192.168.123.106:6817/380370602]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T19:52:45.657 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 19:52:45 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[57013]: 2026-03-09T19:52:45.471+0000 7ff189b153c0 -1 osd.6 0 log_to_monitors true 2026-03-09T19:52:45.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[49698]: pgmap v65: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-09T19:52:45.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:45.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:45.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:45.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:45.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:45.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:45.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[49698]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T19:52:45.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[49698]: from='osd.6 [v2:192.168.123.106:6816/380370602,v1:192.168.123.106:6817/380370602]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T19:52:45.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[53908]: pgmap v65: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-09T19:52:45.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:45.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:45.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:45.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:45.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:45.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:45.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[53908]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T19:52:45.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:45 vm01 ceph-mon[53908]: from='osd.6 [v2:192.168.123.106:6816/380370602,v1:192.168.123.106:6817/380370602]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T19:52:47.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: osdmap e34: 7 total, 6 up, 7 in 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='osd.6 [v2:192.168.123.106:6816/380370602,v1:192.168.123.106:6817/380370602]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='client.24254 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0e4d7562-913f-4157-b012-dd8565afc290"}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/3438194690' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0e4d7562-913f-4157-b012-dd8565afc290"}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0e4d7562-913f-4157-b012-dd8565afc290"}]': finished 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: osdmap e35: 8 total, 6 up, 8 in 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:46 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:47.343 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 19:52:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[57013]: 2026-03-09T19:52:46.861+0000 7ff181d1b700 -1 osd.6 0 waiting for initial osdmap 2026-03-09T19:52:47.343 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 19:52:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[57013]: 2026-03-09T19:52:46.874+0000 7ff17ceb3700 -1 osd.6 35 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: osdmap e34: 7 total, 6 up, 7 in 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='osd.6 [v2:192.168.123.106:6816/380370602,v1:192.168.123.106:6817/380370602]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='client.24254 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0e4d7562-913f-4157-b012-dd8565afc290"}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/3438194690' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0e4d7562-913f-4157-b012-dd8565afc290"}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0e4d7562-913f-4157-b012-dd8565afc290"}]': finished 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: osdmap e35: 8 total, 6 up, 8 in 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: osdmap e34: 7 total, 6 up, 7 in 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='osd.6 [v2:192.168.123.106:6816/380370602,v1:192.168.123.106:6817/380370602]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='client.24254 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0e4d7562-913f-4157-b012-dd8565afc290"}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/3438194690' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0e4d7562-913f-4157-b012-dd8565afc290"}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0e4d7562-913f-4157-b012-dd8565afc290"}]': finished 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: osdmap e35: 8 total, 6 up, 8 in 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:47.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:46 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:48.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:47 vm06 ceph-mon[48187]: Detected new or changed devices on vm06 2026-03-09T19:52:48.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:47 vm06 ceph-mon[48187]: Adjusting osd_memory_target on vm06 to 87737k 2026-03-09T19:52:48.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:47 vm06 ceph-mon[48187]: Unable to set osd_memory_target on vm06 to 89843575: error parsing value: Value '89843575' is below minimum 939524096 2026-03-09T19:52:48.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:47 vm06 ceph-mon[48187]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-09T19:52:48.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:47 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/2388294630' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:48.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:47 vm06 ceph-mon[48187]: osd.6 [v2:192.168.123.106:6816/380370602,v1:192.168.123.106:6817/380370602] boot 2026-03-09T19:52:48.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:47 vm06 ceph-mon[48187]: osdmap e36: 8 total, 7 up, 8 in 2026-03-09T19:52:48.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:47 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:48.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:47 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[49698]: Detected new or changed devices on vm06 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[49698]: Adjusting osd_memory_target on vm06 to 87737k 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[49698]: Unable to set osd_memory_target on vm06 to 89843575: error parsing value: Value '89843575' is below minimum 939524096 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[49698]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/2388294630' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[49698]: osd.6 [v2:192.168.123.106:6816/380370602,v1:192.168.123.106:6817/380370602] boot 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[49698]: osdmap e36: 8 total, 7 up, 8 in 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[53908]: Detected new or changed devices on vm06 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[53908]: Adjusting osd_memory_target on vm06 to 87737k 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[53908]: Unable to set osd_memory_target on vm06 to 89843575: error parsing value: Value '89843575' is below minimum 939524096 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[53908]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-09T19:52:48.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/2388294630' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T19:52:48.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[53908]: osd.6 [v2:192.168.123.106:6816/380370602,v1:192.168.123.106:6817/380370602] boot 2026-03-09T19:52:48.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[53908]: osdmap e36: 8 total, 7 up, 8 in 2026-03-09T19:52:48.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:52:48.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:47 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:49.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:48 vm06 ceph-mon[48187]: purged_snaps scrub starts 2026-03-09T19:52:49.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:48 vm06 ceph-mon[48187]: purged_snaps scrub ok 2026-03-09T19:52:49.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:48 vm06 ceph-mon[48187]: osdmap e37: 8 total, 7 up, 8 in 2026-03-09T19:52:49.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:48 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:49.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:48 vm01 ceph-mon[49698]: purged_snaps scrub starts 2026-03-09T19:52:49.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:48 vm01 ceph-mon[49698]: purged_snaps scrub ok 2026-03-09T19:52:49.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:48 vm01 ceph-mon[49698]: osdmap e37: 8 total, 7 up, 8 in 2026-03-09T19:52:49.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:48 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:49.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:48 vm01 ceph-mon[53908]: purged_snaps scrub starts 2026-03-09T19:52:49.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:48 vm01 ceph-mon[53908]: purged_snaps scrub ok 2026-03-09T19:52:49.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:48 vm01 ceph-mon[53908]: osdmap e37: 8 total, 7 up, 8 in 2026-03-09T19:52:49.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:48 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:50.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:49 vm06 ceph-mon[48187]: pgmap v70: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T19:52:50.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:49 vm06 ceph-mon[48187]: osdmap e38: 8 total, 7 up, 8 in 2026-03-09T19:52:50.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:49 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:49 vm01 ceph-mon[49698]: pgmap v70: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T19:52:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:49 vm01 ceph-mon[49698]: osdmap e38: 8 total, 7 up, 8 in 2026-03-09T19:52:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:49 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:49 vm01 ceph-mon[53908]: pgmap v70: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T19:52:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:49 vm01 ceph-mon[53908]: osdmap e38: 8 total, 7 up, 8 in 2026-03-09T19:52:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:49 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:51.066 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:50 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T19:52:51.066 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:50 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:51.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:50 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T19:52:51.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:50 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:51.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:50 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T19:52:51.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:50 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:51.958 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:51 vm06 ceph-mon[48187]: Deploying daemon osd.7 on vm06 2026-03-09T19:52:51.958 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:51 vm06 ceph-mon[48187]: pgmap v73: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T19:52:51.958 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:51 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:52.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:51 vm01 ceph-mon[49698]: Deploying daemon osd.7 on vm06 2026-03-09T19:52:52.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:51 vm01 ceph-mon[49698]: pgmap v73: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T19:52:52.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:51 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:52.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:51 vm01 ceph-mon[53908]: Deploying daemon osd.7 on vm06 2026-03-09T19:52:52.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:51 vm01 ceph-mon[53908]: pgmap v73: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T19:52:52.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:51 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:52.807 INFO:teuthology.orchestra.run.vm06.stdout:Created osd(s) 7 on host 'vm06' 2026-03-09T19:52:52.879 DEBUG:teuthology.orchestra.run.vm06:osd.7> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.7.service 2026-03-09T19:52:52.880 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-09T19:52:52.881 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd stat -f json 2026-03-09T19:52:53.153 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:53.154 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:53.154 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:53.154 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:53.154 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:53.154 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:53.154 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:53.154 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:53.154 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:52 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:53.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:52 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:53.371 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:52:53.417 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":38,"num_osds":8,"num_up_osds":7,"osd_up_since":1773085967,"num_in_osds":8,"osd_in_since":1773085966,"num_remapped_pgs":0} 2026-03-09T19:52:53.593 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 19:52:53 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[59764]: 2026-03-09T19:52:53.262+0000 7fdeb64583c0 -1 osd.7 0 log_to_monitors true 2026-03-09T19:52:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:53 vm06 ceph-mon[48187]: pgmap v74: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T19:52:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:53 vm06 ceph-mon[48187]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T19:52:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:53 vm06 ceph-mon[48187]: from='osd.7 [v2:192.168.123.106:6824/1606047001,v1:192.168.123.106:6825/1606047001]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T19:52:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:53 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3341654412' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T19:52:54.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:53 vm01 ceph-mon[49698]: pgmap v74: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T19:52:54.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:53 vm01 ceph-mon[49698]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T19:52:54.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:53 vm01 ceph-mon[49698]: from='osd.7 [v2:192.168.123.106:6824/1606047001,v1:192.168.123.106:6825/1606047001]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T19:52:54.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:53 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3341654412' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T19:52:54.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:53 vm01 ceph-mon[53908]: pgmap v74: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T19:52:54.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:53 vm01 ceph-mon[53908]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T19:52:54.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:53 vm01 ceph-mon[53908]: from='osd.7 [v2:192.168.123.106:6824/1606047001,v1:192.168.123.106:6825/1606047001]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T19:52:54.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:53 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3341654412' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T19:52:54.418 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd stat -f json 2026-03-09T19:52:54.867 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:52:54.937 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":39,"num_osds":8,"num_up_osds":7,"osd_up_since":1773085967,"num_in_osds":8,"osd_in_since":1773085966,"num_remapped_pgs":0} 2026-03-09T19:52:54.943 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: osdmap e39: 8 total, 7 up, 8 in 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='osd.7 [v2:192.168.123.106:6824/1606047001,v1:192.168.123.106:6825/1606047001]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: Detected new or changed devices on vm06 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: Adjusting osd_memory_target on vm06 to 65803k 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: Unable to set osd_memory_target on vm06 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1262723457' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: osdmap e40: 8 total, 7 up, 8 in 2026-03-09T19:52:54.944 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: osdmap e39: 8 total, 7 up, 8 in 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='osd.7 [v2:192.168.123.106:6824/1606047001,v1:192.168.123.106:6825/1606047001]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: Detected new or changed devices on vm06 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: Adjusting osd_memory_target on vm06 to 65803k 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: Unable to set osd_memory_target on vm06 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1262723457' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: osdmap e40: 8 total, 7 up, 8 in 2026-03-09T19:52:54.949 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:54 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: osdmap e39: 8 total, 7 up, 8 in 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='osd.7 [v2:192.168.123.106:6824/1606047001,v1:192.168.123.106:6825/1606047001]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: Detected new or changed devices on vm06 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: Adjusting osd_memory_target on vm06 to 65803k 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: Unable to set osd_memory_target on vm06 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1262723457' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: osdmap e40: 8 total, 7 up, 8 in 2026-03-09T19:52:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:54 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:55.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 19:52:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[59764]: 2026-03-09T19:52:54.939+0000 7fdeace5b700 -1 osd.7 0 waiting for initial osdmap 2026-03-09T19:52:55.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 19:52:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[59764]: 2026-03-09T19:52:54.951+0000 7fdea97f6700 -1 osd.7 40 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T19:52:55.938 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd stat -f json 2026-03-09T19:52:56.212 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:55 vm01 ceph-mon[49698]: pgmap v76: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:56.212 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:55 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:56.212 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:55 vm01 ceph-mon[53908]: pgmap v76: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:56.212 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:55 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:56.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:55 vm06 ceph-mon[48187]: pgmap v76: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:56.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:55 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:56.419 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:52:56.481 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":41,"num_osds":8,"num_up_osds":8,"osd_up_since":1773085975,"num_in_osds":8,"osd_in_since":1773085966,"num_remapped_pgs":1} 2026-03-09T19:52:56.481 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd dump --format=json 2026-03-09T19:52:56.644 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:52:57.028 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:52:57.029 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":42,"fsid":"0d36e446-1bf1-11f1-a427-9918c6d73ee3","created":"2026-03-09T19:50:56.432144+0000","modified":"2026-03-09T19:52:56.935354+0000","last_up_change":"2026-03-09T19:52:55.932528+0000","last_in_change":"2026-03-09T19:52:46.848075+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-09T19:52:14.740538+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"19","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"f103ea3b-bf46-454a-98e8-0d7aaed22607","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6803","nonce":1503315304}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6805","nonce":1503315304}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6809","nonce":1503315304}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6807","nonce":1503315304}]},"public_addr":"192.168.123.101:6803/1503315304","cluster_addr":"192.168.123.101:6805/1503315304","heartbeat_back_addr":"192.168.123.101:6809/1503315304","heartbeat_front_addr":"192.168.123.101:6807/1503315304","state":["exists","up"]},{"osd":1,"uuid":"52b44b6f-8ac7-4e25-969b-53f2808d1cb7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6811","nonce":705098126}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6813","nonce":705098126}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6817","nonce":705098126}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6815","nonce":705098126}]},"public_addr":"192.168.123.101:6811/705098126","cluster_addr":"192.168.123.101:6813/705098126","heartbeat_back_addr":"192.168.123.101:6817/705098126","heartbeat_front_addr":"192.168.123.101:6815/705098126","state":["exists","up"]},{"osd":2,"uuid":"98d1fc31-9ae8-4c8e-bed3-b4c00f6da774","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6819","nonce":3336480273}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6821","nonce":3336480273}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6825","nonce":3336480273}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6823","nonce":3336480273}]},"public_addr":"192.168.123.101:6819/3336480273","cluster_addr":"192.168.123.101:6821/3336480273","heartbeat_back_addr":"192.168.123.101:6825/3336480273","heartbeat_front_addr":"192.168.123.101:6823/3336480273","state":["exists","up"]},{"osd":3,"uuid":"76c92c68-25ba-439b-bf8a-3eedaeec05b5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6827","nonce":3485527769}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6829","nonce":3485527769}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6832","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6833","nonce":3485527769}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6831","nonce":3485527769}]},"public_addr":"192.168.123.101:6827/3485527769","cluster_addr":"192.168.123.101:6829/3485527769","heartbeat_back_addr":"192.168.123.101:6833/3485527769","heartbeat_front_addr":"192.168.123.101:6831/3485527769","state":["exists","up"]},{"osd":4,"uuid":"6b56be69-0ed2-425d-90bc-bb41ca45e362","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":26,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6801","nonce":1291324120}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6803","nonce":1291324120}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6807","nonce":1291324120}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6805","nonce":1291324120}]},"public_addr":"192.168.123.106:6801/1291324120","cluster_addr":"192.168.123.106:6803/1291324120","heartbeat_back_addr":"192.168.123.106:6807/1291324120","heartbeat_front_addr":"192.168.123.106:6805/1291324120","state":["exists","up"]},{"osd":5,"uuid":"2ac39182-ee32-432b-bcf1-842662adc4ce","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":31,"up_thru":32,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6809","nonce":3281344724}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6811","nonce":3281344724}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6815","nonce":3281344724}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6813","nonce":3281344724}]},"public_addr":"192.168.123.106:6809/3281344724","cluster_addr":"192.168.123.106:6811/3281344724","heartbeat_back_addr":"192.168.123.106:6815/3281344724","heartbeat_front_addr":"192.168.123.106:6813/3281344724","state":["exists","up"]},{"osd":6,"uuid":"021b1d92-8c4d-47f3-aa12-6b46528c6c64","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":36,"up_thru":37,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6817","nonce":380370602}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6819","nonce":380370602}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6823","nonce":380370602}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6821","nonce":380370602}]},"public_addr":"192.168.123.106:6817/380370602","cluster_addr":"192.168.123.106:6819/380370602","heartbeat_back_addr":"192.168.123.106:6823/380370602","heartbeat_front_addr":"192.168.123.106:6821/380370602","state":["exists","up"]},{"osd":7,"uuid":"0e4d7562-913f-4157-b012-dd8565afc290","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":41,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6825","nonce":1606047001}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6827","nonce":1606047001}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6831","nonce":1606047001}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6829","nonce":1606047001}]},"public_addr":"192.168.123.106:6825/1606047001","cluster_addr":"192.168.123.106:6827/1606047001","heartbeat_back_addr":"192.168.123.106:6831/1606047001","heartbeat_front_addr":"192.168.123.106:6829/1606047001","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:51:55.960617+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:04.618087+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:13.092154+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:21.938220+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:29.963194+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:38.866503+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:46.424420+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:54.280558+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/884642165":"2026-03-10T19:51:20.399353+0000","192.168.123.101:6801/413365033":"2026-03-10T19:51:20.399353+0000","192.168.123.101:6800/413365033":"2026-03-10T19:51:20.399353+0000","192.168.123.101:0/2124962822":"2026-03-10T19:51:20.399353+0000","192.168.123.101:0/895788608":"2026-03-10T19:51:20.399353+0000","192.168.123.101:0/500595740":"2026-03-10T19:51:10.837384+0000","192.168.123.101:0/3427283308":"2026-03-10T19:51:10.837384+0000","192.168.123.101:6800/4101403434":"2026-03-10T19:51:10.837384+0000","192.168.123.101:6801/4101403434":"2026-03-10T19:51:10.837384+0000","192.168.123.101:0/2245030288":"2026-03-10T19:51:10.837384+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T19:52:57.099 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-09T19:52:14.740538+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '19', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}}] 2026-03-09T19:52:57.100 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd pool get .mgr pg_num 2026-03-09T19:52:57.251 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:52:57.276 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[49698]: purged_snaps scrub starts 2026-03-09T19:52:57.276 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[49698]: purged_snaps scrub ok 2026-03-09T19:52:57.276 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[49698]: osd.7 [v2:192.168.123.106:6824/1606047001,v1:192.168.123.106:6825/1606047001] boot 2026-03-09T19:52:57.276 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[49698]: osdmap e41: 8 total, 8 up, 8 in 2026-03-09T19:52:57.276 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:57.276 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1429504149' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T19:52:57.277 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[53908]: purged_snaps scrub starts 2026-03-09T19:52:57.277 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[53908]: purged_snaps scrub ok 2026-03-09T19:52:57.277 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[53908]: osd.7 [v2:192.168.123.106:6824/1606047001,v1:192.168.123.106:6825/1606047001] boot 2026-03-09T19:52:57.277 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[53908]: osdmap e41: 8 total, 8 up, 8 in 2026-03-09T19:52:57.277 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:57.277 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:56 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1429504149' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T19:52:57.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:56 vm06 ceph-mon[48187]: purged_snaps scrub starts 2026-03-09T19:52:57.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:56 vm06 ceph-mon[48187]: purged_snaps scrub ok 2026-03-09T19:52:57.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:56 vm06 ceph-mon[48187]: osd.7 [v2:192.168.123.106:6824/1606047001,v1:192.168.123.106:6825/1606047001] boot 2026-03-09T19:52:57.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:56 vm06 ceph-mon[48187]: osdmap e41: 8 total, 8 up, 8 in 2026-03-09T19:52:57.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:56 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:52:57.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:56 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1429504149' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T19:52:57.575 INFO:teuthology.orchestra.run.vm01.stdout:pg_num: 1 2026-03-09T19:52:57.650 INFO:tasks.cephadm:Adding prometheus.a on vm06 2026-03-09T19:52:57.650 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch apply prometheus '1;vm06=a' 2026-03-09T19:52:58.153 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled prometheus update... 2026-03-09T19:52:58.186 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:57 vm06 ceph-mon[48187]: pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:58.186 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:57 vm06 ceph-mon[48187]: osdmap e42: 8 total, 8 up, 8 in 2026-03-09T19:52:58.186 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:57 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2489774008' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T19:52:58.186 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:57 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2690548468' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-09T19:52:58.186 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:57 vm06 ceph-mon[48187]: osdmap e43: 8 total, 8 up, 8 in 2026-03-09T19:52:58.209 DEBUG:teuthology.orchestra.run.vm06:prometheus.a> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a.service 2026-03-09T19:52:58.211 INFO:tasks.cephadm:Adding node-exporter.a on vm01 2026-03-09T19:52:58.211 INFO:tasks.cephadm:Adding node-exporter.b on vm06 2026-03-09T19:52:58.211 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch apply node-exporter '2;vm01=a;vm06=b' 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[49698]: pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[49698]: osdmap e42: 8 total, 8 up, 8 in 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2489774008' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2690548468' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[49698]: osdmap e43: 8 total, 8 up, 8 in 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[53908]: pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[53908]: osdmap e42: 8 total, 8 up, 8 in 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2489774008' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2690548468' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-09T19:52:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:57 vm01 ceph-mon[53908]: osdmap e43: 8 total, 8 up, 8 in 2026-03-09T19:52:58.688 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled node-exporter update... 2026-03-09T19:52:58.730 DEBUG:teuthology.orchestra.run.vm01:node-exporter.a> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.a.service 2026-03-09T19:52:58.732 DEBUG:teuthology.orchestra.run.vm06:node-exporter.b> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.b.service 2026-03-09T19:52:58.734 INFO:tasks.cephadm:Adding alertmanager.a on vm01 2026-03-09T19:52:58.734 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch apply alertmanager '1;vm01=a' 2026-03-09T19:52:59.243 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:59 vm06 ceph-mon[48187]: from='client.24293 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm06=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:59.243 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:59 vm06 ceph-mon[48187]: Saving service prometheus spec with placement vm06=a;count:1 2026-03-09T19:52:59.243 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:59 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:59.243 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:59 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:59.243 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:59 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:59.243 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:59 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:59.243 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:59 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:59.243 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:59 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-09T19:52:59.243 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:52:59 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:59.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:52:59 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ignoring --setuser ceph since I am not root 2026-03-09T19:52:59.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:52:59 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ignoring --setgroup ceph since I am not root 2026-03-09T19:52:59.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:52:59 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:52:59.345+0000 7f426ffb6000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T19:52:59.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:52:59 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:52:59.399+0000 7f426ffb6000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T19:52:59.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[49698]: from='client.24293 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm06=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:59.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[49698]: Saving service prometheus spec with placement vm06=a;count:1 2026-03-09T19:52:59.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:59.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:59.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[53908]: from='client.24293 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm06=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[53908]: Saving service prometheus spec with placement vm06=a;count:1 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:52:59 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:52:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ignoring --setuser ceph since I am not root 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:52:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ignoring --setgroup ceph since I am not root 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:52:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:52:59.341+0000 7f1ad0aa5000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T19:52:59.630 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:52:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:52:59.396+0000 7f1ad0aa5000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T19:53:00.092 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:52:59 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:52:59.786+0000 7f426ffb6000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T19:53:00.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:52:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:52:59.787+0000 7f1ad0aa5000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T19:53:00.524 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:00 vm06 ceph-mon[48187]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-09T19:53:00.524 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:00 vm06 ceph-mon[48187]: mgrmap e16: y(active, since 98s), standbys: x 2026-03-09T19:53:00.524 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:00.158+0000 7f426ffb6000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T19:53:00.524 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:00.299+0000 7f426ffb6000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T19:53:00.524 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:00.352+0000 7f426ffb6000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T19:53:00.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:00 vm01 ceph-mon[49698]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-09T19:53:00.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:00 vm01 ceph-mon[49698]: mgrmap e16: y(active, since 98s), standbys: x 2026-03-09T19:53:00.557 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:00.164+0000 7f1ad0aa5000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T19:53:00.557 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:00.316+0000 7f1ad0aa5000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T19:53:00.557 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:00.373+0000 7f1ad0aa5000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T19:53:00.557 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:00 vm01 ceph-mon[53908]: from='mgr.14152 192.168.123.101:0/3736943183' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-09T19:53:00.557 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:00 vm01 ceph-mon[53908]: mgrmap e16: y(active, since 98s), standbys: x 2026-03-09T19:53:00.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:00.525+0000 7f426ffb6000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T19:53:00.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:00.555+0000 7f1ad0aa5000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T19:53:01.412 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:01.160+0000 7f426ffb6000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T19:53:01.413 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:01.348+0000 7f426ffb6000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:53:01.472 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:01.214+0000 7f1ad0aa5000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T19:53:01.472 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:01.408+0000 7f1ad0aa5000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:53:01.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:01.412+0000 7f426ffb6000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T19:53:01.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:01.468+0000 7f426ffb6000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T19:53:01.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:01.531+0000 7f426ffb6000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T19:53:01.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:01.588+0000 7f426ffb6000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T19:53:01.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:01.470+0000 7f1ad0aa5000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T19:53:01.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:01.528+0000 7f1ad0aa5000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T19:53:01.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:01.592+0000 7f1ad0aa5000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T19:53:01.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:01.654+0000 7f1ad0aa5000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T19:53:02.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:01.907+0000 7f426ffb6000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T19:53:02.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:01.982+0000 7f426ffb6000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T19:53:02.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:01.977+0000 7f1ad0aa5000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T19:53:02.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:02.061+0000 7f1ad0aa5000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T19:53:02.885 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:02.600+0000 7f426ffb6000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T19:53:02.885 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:02.667+0000 7f426ffb6000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T19:53:02.885 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:02.743+0000 7f426ffb6000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T19:53:02.942 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:02.662+0000 7f1ad0aa5000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T19:53:02.942 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:02.730+0000 7f1ad0aa5000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T19:53:02.942 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:02.801+0000 7f1ad0aa5000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T19:53:03.168 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:02.885+0000 7f426ffb6000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T19:53:03.168 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:02.956+0000 7f426ffb6000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T19:53:03.168 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:03.073+0000 7f426ffb6000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T19:53:03.215 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:02.941+0000 7f1ad0aa5000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T19:53:03.215 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:03.010+0000 7f1ad0aa5000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T19:53:03.215 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:03.113+0000 7f1ad0aa5000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T19:53:03.510 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:03.168+0000 7f426ffb6000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:53:03.555 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:03.213+0000 7f1ad0aa5000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:03 vm06 ceph-mon[48187]: Standby manager daemon x restarted 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:03 vm06 ceph-mon[48187]: Standby manager daemon x started 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:03 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:03 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:03 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:03 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:03 vm06 ceph-mon[48187]: Active manager daemon y restarted 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:03 vm06 ceph-mon[48187]: Activating manager daemon y 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:03.510+0000 7f426ffb6000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:03.577+0000 7f426ffb6000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: [09/Mar/2026:19:53:03] ENGINE Bus STARTING 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: CherryPy Checker: 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: The Application mounted at '' has an empty config. 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: [09/Mar/2026:19:53:03] ENGINE Serving on http://:::9283 2026-03-09T19:53:03.800 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: [09/Mar/2026:19:53:03] ENGINE Bus STARTED 2026-03-09T19:53:03.808 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[49698]: Standby manager daemon x restarted 2026-03-09T19:53:03.808 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[49698]: Standby manager daemon x started 2026-03-09T19:53:03.808 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T19:53:03.808 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:53:03.808 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T19:53:03.808 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:53:03.808 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[49698]: Active manager daemon y restarted 2026-03-09T19:53:03.808 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[49698]: Activating manager daemon y 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:03.553+0000 7f1ad0aa5000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:03.621+0000 7f1ad0aa5000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:53:03] ENGINE Bus STARTING 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[53908]: Standby manager daemon x restarted 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[53908]: Standby manager daemon x started 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/2311656507' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[53908]: Active manager daemon y restarted 2026-03-09T19:53:03.809 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:03 vm01 ceph-mon[53908]: Activating manager daemon y 2026-03-09T19:53:04.130 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: CherryPy Checker: 2026-03-09T19:53:04.130 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: The Application mounted at '' has an empty config. 2026-03-09T19:53:04.130 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:53:04.130 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:53:03] ENGINE Serving on http://:::9283 2026-03-09T19:53:04.130 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:53:03] ENGINE Bus STARTED 2026-03-09T19:53:04.594 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:53:04] ENGINE Bus STARTING 2026-03-09T19:53:04.595 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:53:04] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T19:53:04.595 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:53:04] ENGINE Bus STARTED 2026-03-09T19:53:04.695 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled alertmanager update... 2026-03-09T19:53:04.707 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: osdmap e44: 8 total, 8 up, 8 in 2026-03-09T19:53:04.707 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: mgrmap e17: y(active, starting, since 0.0153157s), standbys: x 2026-03-09T19:53:04.707 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:53:04.707 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:53:04.707 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:53:04.707 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T19:53:04.707 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: Manager daemon y is now available 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.708 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.750 DEBUG:teuthology.orchestra.run.vm01:alertmanager.a> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@alertmanager.a.service 2026-03-09T19:53:04.752 INFO:tasks.cephadm:Adding grafana.a on vm06 2026-03-09T19:53:04.752 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph orch apply grafana '1;vm06=a' 2026-03-09T19:53:04.859 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: osdmap e44: 8 total, 8 up, 8 in 2026-03-09T19:53:04.859 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: mgrmap e17: y(active, starting, since 0.0153157s), standbys: x 2026-03-09T19:53:04.859 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:53:04.859 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: Manager daemon y is now available 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: osdmap e44: 8 total, 8 up, 8 in 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: mgrmap e17: y(active, starting, since 0.0153157s), standbys: x 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: Manager daemon y is now available 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:53:04.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:53:04.861 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:53:04.861 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:53:04.861 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.861 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:04.861 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.308 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled grafana update... 2026-03-09T19:53:05.363 DEBUG:teuthology.orchestra.run.vm06:grafana.a> sudo journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@grafana.a.service 2026-03-09T19:53:05.365 INFO:tasks.cephadm:Setting up client nodes... 2026-03-09T19:53:05.365 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: [09/Mar/2026:19:53:04] ENGINE Bus STARTING 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: [09/Mar/2026:19:53:04] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: [09/Mar/2026:19:53:04] ENGINE Bus STARTED 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='client.24305 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm01=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: Saving service alertmanager spec with placement vm01=a;count:1 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: mgrmap e18: y(active, since 1.0527s), standbys: x 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: Adjusting osd_memory_target on vm06 to 65803k 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: Unable to set osd_memory_target on vm06 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:05 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: [09/Mar/2026:19:53:04] ENGINE Bus STARTING 2026-03-09T19:53:05.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: [09/Mar/2026:19:53:04] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T19:53:05.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: [09/Mar/2026:19:53:04] ENGINE Bus STARTED 2026-03-09T19:53:05.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='client.24305 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm01=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:05.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: Saving service alertmanager spec with placement vm01=a;count:1 2026-03-09T19:53:05.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: mgrmap e18: y(active, since 1.0527s), standbys: x 2026-03-09T19:53:05.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:05.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: Adjusting osd_memory_target on vm06 to 65803k 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: Unable to set osd_memory_target on vm06 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: [09/Mar/2026:19:53:04] ENGINE Bus STARTING 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: [09/Mar/2026:19:53:04] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: [09/Mar/2026:19:53:04] ENGINE Bus STARTED 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='client.24305 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm01=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: Saving service alertmanager spec with placement vm01=a;count:1 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: mgrmap e18: y(active, since 1.0527s), standbys: x 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: Adjusting osd_memory_target on vm06 to 65803k 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: Unable to set osd_memory_target on vm06 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.929 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:05 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:05.980 INFO:teuthology.orchestra.run.vm01.stdout:[client.0] 2026-03-09T19:53:05.980 INFO:teuthology.orchestra.run.vm01.stdout: key = AQAhJa9pVnYAOhAAOBmPjSZ1HyVDEo9JFdXrfw== 2026-03-09T19:53:06.068 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T19:53:06.068 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-09T19:53:06.069 DEBUG:teuthology.orchestra.run.vm01:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-09T19:53:06.238 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:06 vm01 systemd[1]: Starting Ceph node-exporter.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:53:06.246 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-09T19:53:06.629 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:06 vm01 bash[68203]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-09T19:53:06.817 INFO:teuthology.orchestra.run.vm06.stdout:[client.1] 2026-03-09T19:53:06.817 INFO:teuthology.orchestra.run.vm06.stdout: key = AQAiJa9p2yNkMBAAz2xBRokhhYbEMuZ9IgZZ8Q== 2026-03-09T19:53:06.818 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:06 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:53:06.818 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:06 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:53:06.818 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:06 vm06 ceph-mon[48187]: from='client.24334 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm06=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:06.818 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:06 vm06 ceph-mon[48187]: Saving service grafana spec with placement vm06=a;count:1 2026-03-09T19:53:06.818 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:06 vm06 ceph-mon[48187]: Deploying daemon node-exporter.a on vm01 2026-03-09T19:53:06.818 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:06 vm06 ceph-mon[48187]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:06.818 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:06 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2024400828' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T19:53:06.818 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:06 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2024400828' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T19:53:06.818 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:06 vm06 ceph-mon[48187]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-09T19:53:06.893 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T19:53:06.894 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-09T19:53:06.894 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-09T19:53:06.932 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-09T19:53:06.932 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-09T19:53:06.932 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph mgr dump --format=json 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[49698]: from='client.24334 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm06=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[49698]: Saving service grafana spec with placement vm06=a;count:1 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[49698]: Deploying daemon node-exporter.a on vm01 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[49698]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2024400828' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2024400828' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[49698]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:53:06.958 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[53908]: from='client.24334 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm06=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:06.959 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[53908]: Saving service grafana spec with placement vm06=a;count:1 2026-03-09T19:53:06.959 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[53908]: Deploying daemon node-exporter.a on vm01 2026-03-09T19:53:06.959 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[53908]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:06.959 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2024400828' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T19:53:06.959 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2024400828' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T19:53:06.959 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:06 vm01 ceph-mon[53908]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-09T19:53:07.096 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:07.461 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:53:07.531 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":19,"active_gid":14400,"active_name":"y","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6800","nonce":1068105247},{"type":"v1","addr":"192.168.123.101:6801","nonce":1068105247}]},"active_addr":"192.168.123.101:6801/1068105247","active_change":"2026-03-09T19:53:03.624908+0000","active_mgr_features":4540138303579357183,"available":true,"standbys":[{"gid":24313,"name":"x","mgr_features":4540138303579357183,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.101:8443/","prometheus":"http://192.168.123.101:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"last_failure_osd_epoch":44,"active_clients":[{"addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":3734821764}]},{"addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":4186335858}]},{"addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":2587305647}]},{"addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":3749176550}]}]}} 2026-03-09T19:53:07.532 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-09T19:53:07.532 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-09T19:53:07.532 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd dump --format=json 2026-03-09T19:53:07.737 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:07.770 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:07 vm01 ceph-mon[49698]: from='client.? 192.168.123.106:0/1961217694' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T19:53:07.771 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:07 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T19:53:07.771 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:07 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T19:53:07.771 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:07 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1628736329' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T19:53:07.771 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:07 vm01 ceph-mon[53908]: from='client.? 192.168.123.106:0/1961217694' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T19:53:07.771 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:07 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T19:53:07.771 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:07 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T19:53:07.771 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:07 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1628736329' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T19:53:07.771 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:07 vm01 bash[68203]: Getting image source signatures 2026-03-09T19:53:07.771 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:07 vm01 bash[68203]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-09T19:53:07.771 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:07 vm01 bash[68203]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-09T19:53:07.771 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:07 vm01 bash[68203]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-09T19:53:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:07 vm06 ceph-mon[48187]: from='client.? 192.168.123.106:0/1961217694' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T19:53:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:07 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T19:53:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:07 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T19:53:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:07 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1628736329' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T19:53:08.150 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:53:08.150 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":44,"fsid":"0d36e446-1bf1-11f1-a427-9918c6d73ee3","created":"2026-03-09T19:50:56.432144+0000","modified":"2026-03-09T19:53:03.624095+0000","last_up_change":"2026-03-09T19:52:55.932528+0000","last_in_change":"2026-03-09T19:52:46.848075+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-09T19:52:14.740538+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"19","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"f103ea3b-bf46-454a-98e8-0d7aaed22607","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6803","nonce":1503315304}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6805","nonce":1503315304}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6809","nonce":1503315304}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6807","nonce":1503315304}]},"public_addr":"192.168.123.101:6803/1503315304","cluster_addr":"192.168.123.101:6805/1503315304","heartbeat_back_addr":"192.168.123.101:6809/1503315304","heartbeat_front_addr":"192.168.123.101:6807/1503315304","state":["exists","up"]},{"osd":1,"uuid":"52b44b6f-8ac7-4e25-969b-53f2808d1cb7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6811","nonce":705098126}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6813","nonce":705098126}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6817","nonce":705098126}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6815","nonce":705098126}]},"public_addr":"192.168.123.101:6811/705098126","cluster_addr":"192.168.123.101:6813/705098126","heartbeat_back_addr":"192.168.123.101:6817/705098126","heartbeat_front_addr":"192.168.123.101:6815/705098126","state":["exists","up"]},{"osd":2,"uuid":"98d1fc31-9ae8-4c8e-bed3-b4c00f6da774","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6819","nonce":3336480273}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6821","nonce":3336480273}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6825","nonce":3336480273}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6823","nonce":3336480273}]},"public_addr":"192.168.123.101:6819/3336480273","cluster_addr":"192.168.123.101:6821/3336480273","heartbeat_back_addr":"192.168.123.101:6825/3336480273","heartbeat_front_addr":"192.168.123.101:6823/3336480273","state":["exists","up"]},{"osd":3,"uuid":"76c92c68-25ba-439b-bf8a-3eedaeec05b5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6827","nonce":3485527769}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6829","nonce":3485527769}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6832","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6833","nonce":3485527769}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6831","nonce":3485527769}]},"public_addr":"192.168.123.101:6827/3485527769","cluster_addr":"192.168.123.101:6829/3485527769","heartbeat_back_addr":"192.168.123.101:6833/3485527769","heartbeat_front_addr":"192.168.123.101:6831/3485527769","state":["exists","up"]},{"osd":4,"uuid":"6b56be69-0ed2-425d-90bc-bb41ca45e362","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":26,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6801","nonce":1291324120}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6803","nonce":1291324120}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6807","nonce":1291324120}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6805","nonce":1291324120}]},"public_addr":"192.168.123.106:6801/1291324120","cluster_addr":"192.168.123.106:6803/1291324120","heartbeat_back_addr":"192.168.123.106:6807/1291324120","heartbeat_front_addr":"192.168.123.106:6805/1291324120","state":["exists","up"]},{"osd":5,"uuid":"2ac39182-ee32-432b-bcf1-842662adc4ce","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":31,"up_thru":32,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6809","nonce":3281344724}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6811","nonce":3281344724}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6815","nonce":3281344724}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6813","nonce":3281344724}]},"public_addr":"192.168.123.106:6809/3281344724","cluster_addr":"192.168.123.106:6811/3281344724","heartbeat_back_addr":"192.168.123.106:6815/3281344724","heartbeat_front_addr":"192.168.123.106:6813/3281344724","state":["exists","up"]},{"osd":6,"uuid":"021b1d92-8c4d-47f3-aa12-6b46528c6c64","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":36,"up_thru":37,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6817","nonce":380370602}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6819","nonce":380370602}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6823","nonce":380370602}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6821","nonce":380370602}]},"public_addr":"192.168.123.106:6817/380370602","cluster_addr":"192.168.123.106:6819/380370602","heartbeat_back_addr":"192.168.123.106:6823/380370602","heartbeat_front_addr":"192.168.123.106:6821/380370602","state":["exists","up"]},{"osd":7,"uuid":"0e4d7562-913f-4157-b012-dd8565afc290","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":41,"up_thru":42,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6825","nonce":1606047001}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6827","nonce":1606047001}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6831","nonce":1606047001}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6829","nonce":1606047001}]},"public_addr":"192.168.123.106:6825/1606047001","cluster_addr":"192.168.123.106:6827/1606047001","heartbeat_back_addr":"192.168.123.106:6831/1606047001","heartbeat_front_addr":"192.168.123.106:6829/1606047001","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:51:55.960617+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:04.618087+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:13.092154+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:21.938220+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:29.963194+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:38.866503+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:46.424420+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:54.280558+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/188615391":"2026-03-10T19:53:03.624064+0000","192.168.123.101:6801/1206756272":"2026-03-10T19:53:03.624064+0000","192.168.123.101:0/2915987933":"2026-03-10T19:53:03.624064+0000","192.168.123.101:0/960219321":"2026-03-10T19:53:03.624064+0000","192.168.123.101:0/884642165":"2026-03-10T19:51:20.399353+0000","192.168.123.101:6801/413365033":"2026-03-10T19:51:20.399353+0000","192.168.123.101:6800/413365033":"2026-03-10T19:51:20.399353+0000","192.168.123.101:0/2124962822":"2026-03-10T19:51:20.399353+0000","192.168.123.101:0/895788608":"2026-03-10T19:51:20.399353+0000","192.168.123.101:0/500595740":"2026-03-10T19:51:10.837384+0000","192.168.123.101:0/3427283308":"2026-03-10T19:51:10.837384+0000","192.168.123.101:6800/4101403434":"2026-03-10T19:51:10.837384+0000","192.168.123.101:0/3556214780":"2026-03-10T19:53:03.624064+0000","192.168.123.101:6800/1206756272":"2026-03-10T19:53:03.624064+0000","192.168.123.101:6801/4101403434":"2026-03-10T19:51:10.837384+0000","192.168.123.101:0/2245030288":"2026-03-10T19:51:10.837384+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T19:53:08.211 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-09T19:53:08.211 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd dump --format=json 2026-03-09T19:53:08.391 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:08.698 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-mon[49698]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:08.698 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/163774579' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 bash[68203]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 bash[68203]: Writing manifest to image destination 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 podman[68203]: 2026-03-09 19:53:08.475027921 +0000 UTC m=+2.137843123 container create 78658dbfd7848bf40ced65c65a6faa0b8100659d561f727dbf0b53bbeb3151d3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 podman[68203]: 2026-03-09 19:53:08.515814052 +0000 UTC m=+2.178629242 container init 78658dbfd7848bf40ced65c65a6faa0b8100659d561f727dbf0b53bbeb3151d3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 podman[68203]: 2026-03-09 19:53:08.51931332 +0000 UTC m=+2.182128521 container start 78658dbfd7848bf40ced65c65a6faa0b8100659d561f727dbf0b53bbeb3151d3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 bash[68203]: 78658dbfd7848bf40ced65c65a6faa0b8100659d561f727dbf0b53bbeb3151d3 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 podman[68203]: 2026-03-09 19:53:08.464922058 +0000 UTC m=+2.127737259 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.527Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 systemd[1]: Started Ceph node-exporter.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.527Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=arp 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=edac 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-09T19:53:08.698 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=os 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=stat 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=time 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=uname 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.528Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.529Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-09T19:53:08.699 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[68565]: ts=2026-03-09T19:53:08.529Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-09T19:53:08.788 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:53:08.788 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":44,"fsid":"0d36e446-1bf1-11f1-a427-9918c6d73ee3","created":"2026-03-09T19:50:56.432144+0000","modified":"2026-03-09T19:53:03.624095+0000","last_up_change":"2026-03-09T19:52:55.932528+0000","last_in_change":"2026-03-09T19:52:46.848075+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-09T19:52:14.740538+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"19","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"f103ea3b-bf46-454a-98e8-0d7aaed22607","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6803","nonce":1503315304}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6805","nonce":1503315304}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6809","nonce":1503315304}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":1503315304},{"type":"v1","addr":"192.168.123.101:6807","nonce":1503315304}]},"public_addr":"192.168.123.101:6803/1503315304","cluster_addr":"192.168.123.101:6805/1503315304","heartbeat_back_addr":"192.168.123.101:6809/1503315304","heartbeat_front_addr":"192.168.123.101:6807/1503315304","state":["exists","up"]},{"osd":1,"uuid":"52b44b6f-8ac7-4e25-969b-53f2808d1cb7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6811","nonce":705098126}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6813","nonce":705098126}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6817","nonce":705098126}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":705098126},{"type":"v1","addr":"192.168.123.101:6815","nonce":705098126}]},"public_addr":"192.168.123.101:6811/705098126","cluster_addr":"192.168.123.101:6813/705098126","heartbeat_back_addr":"192.168.123.101:6817/705098126","heartbeat_front_addr":"192.168.123.101:6815/705098126","state":["exists","up"]},{"osd":2,"uuid":"98d1fc31-9ae8-4c8e-bed3-b4c00f6da774","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6819","nonce":3336480273}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6821","nonce":3336480273}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6825","nonce":3336480273}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":3336480273},{"type":"v1","addr":"192.168.123.101:6823","nonce":3336480273}]},"public_addr":"192.168.123.101:6819/3336480273","cluster_addr":"192.168.123.101:6821/3336480273","heartbeat_back_addr":"192.168.123.101:6825/3336480273","heartbeat_front_addr":"192.168.123.101:6823/3336480273","state":["exists","up"]},{"osd":3,"uuid":"76c92c68-25ba-439b-bf8a-3eedaeec05b5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6827","nonce":3485527769}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6829","nonce":3485527769}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6832","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6833","nonce":3485527769}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":3485527769},{"type":"v1","addr":"192.168.123.101:6831","nonce":3485527769}]},"public_addr":"192.168.123.101:6827/3485527769","cluster_addr":"192.168.123.101:6829/3485527769","heartbeat_back_addr":"192.168.123.101:6833/3485527769","heartbeat_front_addr":"192.168.123.101:6831/3485527769","state":["exists","up"]},{"osd":4,"uuid":"6b56be69-0ed2-425d-90bc-bb41ca45e362","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":26,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6801","nonce":1291324120}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6803","nonce":1291324120}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6807","nonce":1291324120}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1291324120},{"type":"v1","addr":"192.168.123.106:6805","nonce":1291324120}]},"public_addr":"192.168.123.106:6801/1291324120","cluster_addr":"192.168.123.106:6803/1291324120","heartbeat_back_addr":"192.168.123.106:6807/1291324120","heartbeat_front_addr":"192.168.123.106:6805/1291324120","state":["exists","up"]},{"osd":5,"uuid":"2ac39182-ee32-432b-bcf1-842662adc4ce","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":31,"up_thru":32,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6809","nonce":3281344724}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6811","nonce":3281344724}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6815","nonce":3281344724}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":3281344724},{"type":"v1","addr":"192.168.123.106:6813","nonce":3281344724}]},"public_addr":"192.168.123.106:6809/3281344724","cluster_addr":"192.168.123.106:6811/3281344724","heartbeat_back_addr":"192.168.123.106:6815/3281344724","heartbeat_front_addr":"192.168.123.106:6813/3281344724","state":["exists","up"]},{"osd":6,"uuid":"021b1d92-8c4d-47f3-aa12-6b46528c6c64","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":36,"up_thru":37,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6817","nonce":380370602}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6819","nonce":380370602}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6823","nonce":380370602}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":380370602},{"type":"v1","addr":"192.168.123.106:6821","nonce":380370602}]},"public_addr":"192.168.123.106:6817/380370602","cluster_addr":"192.168.123.106:6819/380370602","heartbeat_back_addr":"192.168.123.106:6823/380370602","heartbeat_front_addr":"192.168.123.106:6821/380370602","state":["exists","up"]},{"osd":7,"uuid":"0e4d7562-913f-4157-b012-dd8565afc290","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":41,"up_thru":42,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6825","nonce":1606047001}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6827","nonce":1606047001}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6831","nonce":1606047001}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":1606047001},{"type":"v1","addr":"192.168.123.106:6829","nonce":1606047001}]},"public_addr":"192.168.123.106:6825/1606047001","cluster_addr":"192.168.123.106:6827/1606047001","heartbeat_back_addr":"192.168.123.106:6831/1606047001","heartbeat_front_addr":"192.168.123.106:6829/1606047001","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:51:55.960617+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:04.618087+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:13.092154+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:21.938220+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:29.963194+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:38.866503+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:46.424420+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T19:52:54.280558+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/188615391":"2026-03-10T19:53:03.624064+0000","192.168.123.101:6801/1206756272":"2026-03-10T19:53:03.624064+0000","192.168.123.101:0/2915987933":"2026-03-10T19:53:03.624064+0000","192.168.123.101:0/960219321":"2026-03-10T19:53:03.624064+0000","192.168.123.101:0/884642165":"2026-03-10T19:51:20.399353+0000","192.168.123.101:6801/413365033":"2026-03-10T19:51:20.399353+0000","192.168.123.101:6800/413365033":"2026-03-10T19:51:20.399353+0000","192.168.123.101:0/2124962822":"2026-03-10T19:51:20.399353+0000","192.168.123.101:0/895788608":"2026-03-10T19:51:20.399353+0000","192.168.123.101:0/500595740":"2026-03-10T19:51:10.837384+0000","192.168.123.101:0/3427283308":"2026-03-10T19:51:10.837384+0000","192.168.123.101:6800/4101403434":"2026-03-10T19:51:10.837384+0000","192.168.123.101:0/3556214780":"2026-03-10T19:53:03.624064+0000","192.168.123.101:6800/1206756272":"2026-03-10T19:53:03.624064+0000","192.168.123.101:6801/4101403434":"2026-03-10T19:51:10.837384+0000","192.168.123.101:0/2245030288":"2026-03-10T19:51:10.837384+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T19:53:08.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:08 vm06 ceph-mon[48187]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:08.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:08 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/163774579' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T19:53:08.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:08 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:08.863 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph tell osd.0 flush_pg_stats 2026-03-09T19:53:08.863 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph tell osd.1 flush_pg_stats 2026-03-09T19:53:08.863 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph tell osd.2 flush_pg_stats 2026-03-09T19:53:08.863 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph tell osd.3 flush_pg_stats 2026-03-09T19:53:08.863 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph tell osd.4 flush_pg_stats 2026-03-09T19:53:08.863 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph tell osd.5 flush_pg_stats 2026-03-09T19:53:08.863 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph tell osd.6 flush_pg_stats 2026-03-09T19:53:08.863 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph tell osd.7 flush_pg_stats 2026-03-09T19:53:08.947 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:08 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:08.947 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:08 vm01 ceph-mon[53908]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:08.947 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:08 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/163774579' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T19:53:08.947 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:08 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:09.117 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:09 vm06 systemd[1]: Starting Ceph node-exporter.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:53:09.492 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:09.499 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:09.546 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:09.551 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:09.592 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:09 vm06 bash[62391]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-09T19:53:09.610 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:09.622 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:09.628 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:09.755 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:09.834 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:09 vm01 ceph-mon[49698]: Deploying daemon node-exporter.b on vm06 2026-03-09T19:53:09.835 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:09 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2792498751' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T19:53:09.835 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:09 vm01 ceph-mon[53908]: Deploying daemon node-exporter.b on vm06 2026-03-09T19:53:09.835 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:09 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2792498751' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T19:53:10.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:09 vm06 ceph-mon[48187]: Deploying daemon node-exporter.b on vm06 2026-03-09T19:53:10.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:09 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2792498751' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T19:53:10.390 INFO:teuthology.orchestra.run.vm01.stdout:68719476748 2026-03-09T19:53:10.390 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd last-stat-seq osd.2 2026-03-09T19:53:10.822 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:10.842 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:10 vm06 bash[62391]: Getting image source signatures 2026-03-09T19:53:10.842 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:10 vm06 bash[62391]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-09T19:53:10.842 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:10 vm06 bash[62391]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-09T19:53:10.842 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:10 vm06 bash[62391]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-09T19:53:11.109 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:10 vm01 ceph-mon[49698]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:11.110 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:11 vm01 ceph-mon[53908]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:11.162 INFO:teuthology.orchestra.run.vm01.stdout:34359738384 2026-03-09T19:53:11.163 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd last-stat-seq osd.0 2026-03-09T19:53:11.271 INFO:teuthology.orchestra.run.vm01.stdout:176093659140 2026-03-09T19:53:11.271 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd last-stat-seq osd.7 2026-03-09T19:53:11.298 INFO:teuthology.orchestra.run.vm01.stdout:111669149705 2026-03-09T19:53:11.298 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd last-stat-seq osd.4 2026-03-09T19:53:11.300 INFO:teuthology.orchestra.run.vm01.stdout:94489280523 2026-03-09T19:53:11.300 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd last-stat-seq osd.3 2026-03-09T19:53:11.319 INFO:teuthology.orchestra.run.vm01.stdout:51539607566 2026-03-09T19:53:11.320 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd last-stat-seq osd.1 2026-03-09T19:53:11.333 INFO:teuthology.orchestra.run.vm01.stdout:133143986184 2026-03-09T19:53:11.333 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd last-stat-seq osd.5 2026-03-09T19:53:11.336 INFO:teuthology.orchestra.run.vm01.stdout:154618822662 2026-03-09T19:53:11.336 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph osd last-stat-seq osd.6 2026-03-09T19:53:11.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:10 vm06 ceph-mon[48187]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:11.479 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:12.080 INFO:teuthology.orchestra.run.vm01.stdout:68719476748 2026-03-09T19:53:12.093 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 bash[62391]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-09T19:53:12.093 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 bash[62391]: Writing manifest to image destination 2026-03-09T19:53:12.093 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 podman[62391]: 2026-03-09 19:53:11.707750631 +0000 UTC m=+2.503007714 container create 65e409c65bb862973fc99460758043f8d297927e76ca41b358878143d6c11b9a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T19:53:12.093 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 podman[62391]: 2026-03-09 19:53:11.702146031 +0000 UTC m=+2.497403114 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-09T19:53:12.093 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 podman[62391]: 2026-03-09 19:53:11.732095553 +0000 UTC m=+2.527352625 container init 65e409c65bb862973fc99460758043f8d297927e76ca41b358878143d6c11b9a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T19:53:12.093 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 podman[62391]: 2026-03-09 19:53:11.734571137 +0000 UTC m=+2.529828220 container start 65e409c65bb862973fc99460758043f8d297927e76ca41b358878143d6c11b9a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T19:53:12.093 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 bash[62391]: 65e409c65bb862973fc99460758043f8d297927e76ca41b358878143d6c11b9a 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.738Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.738Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=arp 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=edac 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 systemd[1]: Started Ceph node-exporter.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=os 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=stat 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=time 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=uname 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.739Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-09T19:53:12.094 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:53:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[62447]: ts=2026-03-09T19:53:11.740Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-09T19:53:12.125 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:12.273 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:12.283 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476748 got 68719476748 for osd.2 2026-03-09T19:53:12.283 DEBUG:teuthology.parallel:result is None 2026-03-09T19:53:12.296 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:12.304 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:12.421 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:12.496 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:12.614 INFO:teuthology.orchestra.run.vm01.stdout:34359738384 2026-03-09T19:53:12.694 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738384 got 34359738384 for osd.0 2026-03-09T19:53:12.694 DEBUG:teuthology.parallel:result is None 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[49698]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[49698]: Deploying daemon prometheus.a on vm06 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2184867760' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1900023990' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[53908]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[53908]: Deploying daemon prometheus.a on vm06 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2184867760' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-09T19:53:12.850 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:12 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1900023990' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-09T19:53:13.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:12 vm06 ceph-mon[48187]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:13.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:12 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:13.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:12 vm06 ceph-mon[48187]: Deploying daemon prometheus.a on vm06 2026-03-09T19:53:13.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:12 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2184867760' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-09T19:53:13.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:12 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1900023990' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-09T19:53:13.486 INFO:teuthology.orchestra.run.vm01.stdout:154618822662 2026-03-09T19:53:13.540 INFO:teuthology.orchestra.run.vm01.stdout:94489280523 2026-03-09T19:53:13.583 INFO:teuthology.orchestra.run.vm01.stdout:51539607566 2026-03-09T19:53:13.619 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280523 got 94489280523 for osd.3 2026-03-09T19:53:13.619 DEBUG:teuthology.parallel:result is None 2026-03-09T19:53:13.621 INFO:teuthology.orchestra.run.vm01.stdout:133143986184 2026-03-09T19:53:13.661 INFO:teuthology.orchestra.run.vm01.stdout:176093659140 2026-03-09T19:53:13.678 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607566 got 51539607566 for osd.1 2026-03-09T19:53:13.678 DEBUG:teuthology.parallel:result is None 2026-03-09T19:53:13.713 INFO:tasks.cephadm.ceph_manager.ceph:need seq 154618822662 got 154618822662 for osd.6 2026-03-09T19:53:13.713 DEBUG:teuthology.parallel:result is None 2026-03-09T19:53:13.714 INFO:teuthology.orchestra.run.vm01.stdout:111669149705 2026-03-09T19:53:13.814 INFO:tasks.cephadm.ceph_manager.ceph:need seq 133143986184 got 133143986184 for osd.5 2026-03-09T19:53:13.814 DEBUG:teuthology.parallel:result is None 2026-03-09T19:53:13.850 INFO:tasks.cephadm.ceph_manager.ceph:need seq 111669149705 got 111669149705 for osd.4 2026-03-09T19:53:13.850 DEBUG:teuthology.parallel:result is None 2026-03-09T19:53:13.860 INFO:tasks.cephadm.ceph_manager.ceph:need seq 176093659140 got 176093659140 for osd.7 2026-03-09T19:53:13.860 DEBUG:teuthology.parallel:result is None 2026-03-09T19:53:13.860 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-09T19:53:13.861 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph pg dump --format=json 2026-03-09T19:53:14.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:13 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3629619197' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-09T19:53:14.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:13 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3828560864' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-09T19:53:14.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:13 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2354587587' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-09T19:53:14.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:13 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1563588158' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-09T19:53:14.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:13 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2103468058' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-09T19:53:14.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:13 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:14.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:13 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/552916720' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-09T19:53:14.092 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:14.123 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3629619197' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-09T19:53:14.123 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3828560864' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-09T19:53:14.123 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2354587587' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-09T19:53:14.123 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1563588158' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-09T19:53:14.123 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2103468058' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-09T19:53:14.123 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:14.123 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/552916720' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-09T19:53:14.124 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3629619197' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-09T19:53:14.124 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3828560864' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-09T19:53:14.124 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2354587587' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-09T19:53:14.124 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1563588158' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-09T19:53:14.124 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2103468058' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-09T19:53:14.124 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:14.124 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:13 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/552916720' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-09T19:53:14.453 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:53:14.457 INFO:teuthology.orchestra.run.vm01.stderr:dumped all 2026-03-09T19:53:14.503 INFO:teuthology.orchestra.run.vm01.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-09T19:53:13.650999+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48540,"kb_used_data":4508,"kb_used_omap":0,"kb_used_meta":43968,"kb_avail":167690852,"statfs":{"total":171765137408,"available":171715432448,"internally_reserved":0,"allocated":4616192,"data_stored":2589083,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45023232},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.967698"},"pg_stats":[{"pgid":"1.0","version":"44'87","reported_seq":56,"reported_epoch":44,"state":"active+clean","last_fresh":"2026-03-09T19:53:03.733380+0000","last_change":"2026-03-09T19:52:58.280732+0000","last_active":"2026-03-09T19:53:03.733380+0000","last_peered":"2026-03-09T19:53:03.733380+0000","last_clean":"2026-03-09T19:53:03.733380+0000","last_became_active":"2026-03-09T19:52:57.973848+0000","last_became_peered":"2026-03-09T19:52:57.973848+0000","last_unstale":"2026-03-09T19:53:03.733380+0000","last_undegraded":"2026-03-09T19:53:03.733380+0000","last_fullsized":"2026-03-09T19:53:03.733380+0000","mapping_epoch":42,"log_start":"0'0","ondisk_log_start":"0'0","created":17,"last_epoch_clean":43,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-09T19:52:15.248326+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-09T19:52:15.248326+0000","last_clean_scrub_stamp":"2026-03-09T19:52:15.248326+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T00:47:19.102860+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":41,"seq":176093659140,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6064,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961360,"statfs":{"total":21470642176,"available":21464432640,"internally_reserved":0,"allocated":827392,"data_stored":572246,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":8.4440000000000008}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":8.4209999999999994}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":2.8410000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":2.9380000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57999999999999996}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61799999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58999999999999997}]}]},{"osd":6,"up_from":36,"seq":154618822662,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6064,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961360,"statfs":{"total":21470642176,"available":21464432640,"internally_reserved":0,"allocated":827392,"data_stored":572246,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97999999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.746}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.752}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76300000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.222}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46899999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.495}]}]},{"osd":1,"up_from":12,"seq":51539607566,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6252,"kb_used_data":420,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961172,"statfs":{"total":21470642176,"available":21464240128,"internally_reserved":0,"allocated":430080,"data_stored":174721,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Mon Mar 9 19:53:08 2026","interfaces":[{"interface":"back","average":{"1min":0.52000000000000002,"5min":0.52000000000000002,"15min":0.52000000000000002},"min":{"1min":0.20899999999999999,"5min":0.20899999999999999,"15min":0.20899999999999999},"max":{"1min":2.3239999999999998,"5min":2.3239999999999998,"15min":2.3239999999999998},"last":0.76000000000000001},{"interface":"front","average":{"1min":0.57799999999999996,"5min":0.57799999999999996,"15min":0.57799999999999996},"min":{"1min":0.19900000000000001,"5min":0.19900000000000001,"15min":0.19900000000000001},"max":{"1min":2.3889999999999998,"5min":2.3889999999999998,"15min":2.3889999999999998},"last":0.80100000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78700000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81200000000000006}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.39900000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73899999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.752}]}]},{"osd":0,"up_from":8,"seq":34359738384,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6708,"kb_used_data":812,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960716,"statfs":{"total":21470642176,"available":21463773184,"internally_reserved":0,"allocated":831488,"data_stored":572561,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Mon Mar 9 19:53:08 2026","interfaces":[{"interface":"back","average":{"1min":0.52800000000000002,"5min":0.52800000000000002,"15min":0.52800000000000002},"min":{"1min":0.20599999999999999,"5min":0.20599999999999999,"15min":0.20599999999999999},"max":{"1min":1.069,"5min":1.069,"15min":1.069},"last":0.60999999999999999},{"interface":"front","average":{"1min":0.54800000000000004,"5min":0.54800000000000004,"15min":0.54800000000000004},"min":{"1min":0.29299999999999998,"5min":0.29299999999999998,"15min":0.29299999999999998},"max":{"1min":1.294,"5min":1.294,"15min":1.294},"last":1.294}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61799999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.3120000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.179}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64800000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1870000000000001}]}]},{"osd":2,"up_from":16,"seq":68719476748,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6248,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961176,"statfs":{"total":21470642176,"available":21464244224,"internally_reserved":0,"allocated":425984,"data_stored":174406,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.33100000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77100000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.88200000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86199999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83199999999999996}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.876}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78100000000000003}]}]},{"osd":3,"up_from":22,"seq":94489280523,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5740,"kb_used_data":420,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961684,"statfs":{"total":21470642176,"available":21464764416,"internally_reserved":0,"allocated":430080,"data_stored":174721,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52200000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56999999999999995}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61299999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58499999999999996}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54000000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66200000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57799999999999996}]}]},{"osd":4,"up_from":26,"seq":111669149705,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":174091,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73799999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63100000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54800000000000004}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72699999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56100000000000005}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68899999999999995}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78900000000000003}]}]},{"osd":5,"up_from":31,"seq":133143986184,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":174091,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61599999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63700000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82799999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.751}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56699999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55400000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.80500000000000005}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-09T19:53:14.503 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph pg dump --format=json 2026-03-09T19:53:14.658 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:14.999 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:53:15.000 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:14 vm01 ceph-mon[49698]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:15.000 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:14 vm01 ceph-mon[53908]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:15.002 INFO:teuthology.orchestra.run.vm01.stderr:dumped all 2026-03-09T19:53:15.051 INFO:teuthology.orchestra.run.vm01.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-09T19:53:13.650999+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48540,"kb_used_data":4508,"kb_used_omap":0,"kb_used_meta":43968,"kb_avail":167690852,"statfs":{"total":171765137408,"available":171715432448,"internally_reserved":0,"allocated":4616192,"data_stored":2589083,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45023232},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.967698"},"pg_stats":[{"pgid":"1.0","version":"44'87","reported_seq":56,"reported_epoch":44,"state":"active+clean","last_fresh":"2026-03-09T19:53:03.733380+0000","last_change":"2026-03-09T19:52:58.280732+0000","last_active":"2026-03-09T19:53:03.733380+0000","last_peered":"2026-03-09T19:53:03.733380+0000","last_clean":"2026-03-09T19:53:03.733380+0000","last_became_active":"2026-03-09T19:52:57.973848+0000","last_became_peered":"2026-03-09T19:52:57.973848+0000","last_unstale":"2026-03-09T19:53:03.733380+0000","last_undegraded":"2026-03-09T19:53:03.733380+0000","last_fullsized":"2026-03-09T19:53:03.733380+0000","mapping_epoch":42,"log_start":"0'0","ondisk_log_start":"0'0","created":17,"last_epoch_clean":43,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-09T19:52:15.248326+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-09T19:52:15.248326+0000","last_clean_scrub_stamp":"2026-03-09T19:52:15.248326+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T00:47:19.102860+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":41,"seq":176093659140,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6064,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961360,"statfs":{"total":21470642176,"available":21464432640,"internally_reserved":0,"allocated":827392,"data_stored":572246,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":8.4440000000000008}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":8.4209999999999994}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":2.8410000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":2.9380000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57999999999999996}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61799999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58999999999999997}]}]},{"osd":6,"up_from":36,"seq":154618822662,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6064,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961360,"statfs":{"total":21470642176,"available":21464432640,"internally_reserved":0,"allocated":827392,"data_stored":572246,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97999999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.746}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.752}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76300000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.222}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46899999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.495}]}]},{"osd":1,"up_from":12,"seq":51539607566,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6252,"kb_used_data":420,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961172,"statfs":{"total":21470642176,"available":21464240128,"internally_reserved":0,"allocated":430080,"data_stored":174721,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Mon Mar 9 19:53:08 2026","interfaces":[{"interface":"back","average":{"1min":0.52000000000000002,"5min":0.52000000000000002,"15min":0.52000000000000002},"min":{"1min":0.20899999999999999,"5min":0.20899999999999999,"15min":0.20899999999999999},"max":{"1min":2.3239999999999998,"5min":2.3239999999999998,"15min":2.3239999999999998},"last":0.76000000000000001},{"interface":"front","average":{"1min":0.57799999999999996,"5min":0.57799999999999996,"15min":0.57799999999999996},"min":{"1min":0.19900000000000001,"5min":0.19900000000000001,"15min":0.19900000000000001},"max":{"1min":2.3889999999999998,"5min":2.3889999999999998,"15min":2.3889999999999998},"last":0.80100000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78700000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81200000000000006}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.39900000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73899999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.752}]}]},{"osd":0,"up_from":8,"seq":34359738384,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6708,"kb_used_data":812,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960716,"statfs":{"total":21470642176,"available":21463773184,"internally_reserved":0,"allocated":831488,"data_stored":572561,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Mon Mar 9 19:53:08 2026","interfaces":[{"interface":"back","average":{"1min":0.52800000000000002,"5min":0.52800000000000002,"15min":0.52800000000000002},"min":{"1min":0.20599999999999999,"5min":0.20599999999999999,"15min":0.20599999999999999},"max":{"1min":1.069,"5min":1.069,"15min":1.069},"last":0.60999999999999999},{"interface":"front","average":{"1min":0.54800000000000004,"5min":0.54800000000000004,"15min":0.54800000000000004},"min":{"1min":0.29299999999999998,"5min":0.29299999999999998,"15min":0.29299999999999998},"max":{"1min":1.294,"5min":1.294,"15min":1.294},"last":1.294}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61799999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.3120000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.179}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64800000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1870000000000001}]}]},{"osd":2,"up_from":16,"seq":68719476748,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6248,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961176,"statfs":{"total":21470642176,"available":21464244224,"internally_reserved":0,"allocated":425984,"data_stored":174406,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.33100000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77100000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.88200000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86199999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83199999999999996}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.876}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78100000000000003}]}]},{"osd":3,"up_from":22,"seq":94489280523,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5740,"kb_used_data":420,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961684,"statfs":{"total":21470642176,"available":21464764416,"internally_reserved":0,"allocated":430080,"data_stored":174721,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52200000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56999999999999995}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61299999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58499999999999996}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54000000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66200000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57799999999999996}]}]},{"osd":4,"up_from":26,"seq":111669149705,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":174091,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73799999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63100000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54800000000000004}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72699999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56100000000000005}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68899999999999995}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78900000000000003}]}]},{"osd":5,"up_from":31,"seq":133143986184,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":174091,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61599999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63700000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82799999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.751}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56699999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55400000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.80500000000000005}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-09T19:53:15.051 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-09T19:53:15.052 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-09T19:53:15.052 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-09T19:53:15.052 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph health --format=json 2026-03-09T19:53:15.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:14 vm06 ceph-mon[48187]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:15.215 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/mon.a/config 2026-03-09T19:53:15.585 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:53:15.585 INFO:teuthology.orchestra.run.vm01.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-09T19:53:15.657 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-09T19:53:15.657 INFO:tasks.cephadm:Setup complete, yielding 2026-03-09T19:53:15.657 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T19:53:15.659 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm01.local 2026-03-09T19:53:15.659 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- bash -c 'ceph config set mgr mgr/cephadm/use_repo_digest false --force' 2026-03-09T19:53:15.840 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:15 vm01 ceph-mon[49698]: from='client.24421 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T19:53:15.840 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:15 vm01 ceph-mon[49698]: from='client.24434 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T19:53:15.840 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:15 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2301735196' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-09T19:53:15.841 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:15 vm01 ceph-mon[53908]: from='client.24421 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T19:53:15.841 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:15 vm01 ceph-mon[53908]: from='client.24434 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T19:53:15.841 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:15 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2301735196' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-09T19:53:16.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:15 vm06 ceph-mon[48187]: from='client.24421 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T19:53:16.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:15 vm06 ceph-mon[48187]: from='client.24434 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T19:53:16.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:15 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2301735196' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-09T19:53:16.190 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T19:53:16.192 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm01.local 2026-03-09T19:53:16.192 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin realm create --rgw-realm=r --default' 2026-03-09T19:53:16.723 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 systemd[1]: Starting Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:53:17.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 podman[62872]: 2026-03-09 19:53:16.723677483 +0000 UTC m=+0.023035331 container create 09e75a279884d690ee18628a0f2ae4d4c8928a32f8efa2e305941deba0798749 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:17.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 podman[62872]: 2026-03-09 19:53:16.756247852 +0000 UTC m=+0.055605711 container init 09e75a279884d690ee18628a0f2ae4d4c8928a32f8efa2e305941deba0798749 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:17.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 podman[62872]: 2026-03-09 19:53:16.758973944 +0000 UTC m=+0.058331803 container start 09e75a279884d690ee18628a0f2ae4d4c8928a32f8efa2e305941deba0798749 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:17.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 bash[62872]: 09e75a279884d690ee18628a0f2ae4d4c8928a32f8efa2e305941deba0798749 2026-03-09T19:53:17.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 podman[62872]: 2026-03-09 19:53:16.712775886 +0000 UTC m=+0.012133755 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-09T19:53:17.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 systemd[1]: Started Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:53:17.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.795Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.795Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.795Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.795Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm06 (none))" 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.795Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.795Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.796Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.796Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.798Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.799Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=991ns 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.799Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.799Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.799Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=0 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.799Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=58.75µs wal_replay_duration=278.872µs total_replay_duration=446.055µs 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.799Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.799Z caller=main.go:947 level=info msg="TSDB started" 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.799Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.812Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=12.505529ms db_storage=621ns remote_storage=1.022µs web_handler=391ns query_engine=450ns scrape=722.182µs scrape_sd=21.269µs notify=901ns notify_sd=1.412µs rules=11.557606ms 2026-03-09T19:53:17.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:16.812Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-09T19:53:17.122 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:17 vm01 ceph-mon[49698]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:17.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:17 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' 2026-03-09T19:53:17.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:17 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:17.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:17 vm01 ceph-mon[53908]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:17.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:17 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' 2026-03-09T19:53:17.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:17 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:17.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:17 vm06 ceph-mon[48187]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:17.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:17 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' 2026-03-09T19:53:17.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:17 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:18.192 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T19:53:18.192 INFO:teuthology.orchestra.run.vm01.stdout: "id": "e586d07c-0e08-48d4-9c44-0b27a56d84b7", 2026-03-09T19:53:18.192 INFO:teuthology.orchestra.run.vm01.stdout: "name": "r", 2026-03-09T19:53:18.192 INFO:teuthology.orchestra.run.vm01.stdout: "current_period": "d6f7a670-52fd-4e75-a35f-282550727f6d", 2026-03-09T19:53:18.192 INFO:teuthology.orchestra.run.vm01.stdout: "epoch": 1 2026-03-09T19:53:18.192 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T19:53:18.228 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zonegroup create --rgw-zonegroup=default --master --default' 2026-03-09T19:53:18.396 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:18 vm01 ceph-mon[49698]: Deploying daemon alertmanager.a on vm01 2026-03-09T19:53:18.397 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:18 vm01 ceph-mon[49698]: osdmap e45: 8 total, 8 up, 8 in 2026-03-09T19:53:18.397 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:18 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3374492776' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T19:53:18.397 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:18 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T19:53:18.397 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:18 vm01 ceph-mon[53908]: Deploying daemon alertmanager.a on vm01 2026-03-09T19:53:18.397 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:18 vm01 ceph-mon[53908]: osdmap e45: 8 total, 8 up, 8 in 2026-03-09T19:53:18.397 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:18 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3374492776' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T19:53:18.397 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:18 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "id": "2edc8848-5875-4d4f-bb57-33cd954f09a4", 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "name": "default", 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "api_name": "default", 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "is_master": "true", 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "endpoints": [], 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "hostnames": [], 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "hostnames_s3website": [], 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "master_zone": "", 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "zones": [], 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "placement_targets": [], 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "default_placement": "", 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "realm_id": "e586d07c-0e08-48d4-9c44-0b27a56d84b7", 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "sync_policy": { 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: "groups": [] 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:18.561 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T19:53:18.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:18 vm06 ceph-mon[48187]: Deploying daemon alertmanager.a on vm01 2026-03-09T19:53:18.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:18 vm06 ceph-mon[48187]: osdmap e45: 8 total, 8 up, 8 in 2026-03-09T19:53:18.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:18 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3374492776' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T19:53:18.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:18 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T19:53:18.630 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default' 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "id": "a8535b29-1a67-4458-acaa-4ff96fd38692", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "name": "z", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "domain_root": "z.rgw.meta:root", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "control_pool": "z.rgw.control", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "gc_pool": "z.rgw.log:gc", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "lc_pool": "z.rgw.log:lc", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "log_pool": "z.rgw.log", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "intent_log_pool": "z.rgw.log:intent", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "usage_log_pool": "z.rgw.log:usage", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "roles_pool": "z.rgw.meta:roles", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "reshard_pool": "z.rgw.log:reshard", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "user_keys_pool": "z.rgw.meta:users.keys", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "user_email_pool": "z.rgw.meta:users.email", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "user_swift_pool": "z.rgw.meta:users.swift", 2026-03-09T19:53:19.131 INFO:teuthology.orchestra.run.vm01.stdout: "user_uid_pool": "z.rgw.meta:users.uid", 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "otp_pool": "z.rgw.otp", 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "system_key": { 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "access_key": "", 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "secret_key": "" 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "placement_pools": [ 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "key": "default-placement", 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "val": { 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "index_pool": "z.rgw.buckets.index", 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "storage_classes": { 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "STANDARD": { 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "data_pool": "z.rgw.buckets.data" 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "data_extra_pool": "z.rgw.buckets.non-ec", 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "index_type": 0 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: ], 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "realm_id": "e586d07c-0e08-48d4-9c44-0b27a56d84b7", 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout: "notif_pool": "z.rgw.log:notif" 2026-03-09T19:53:19.132 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T19:53:19.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:19 vm01 ceph-mon[53908]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:19.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:19 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-09T19:53:19.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:19 vm01 ceph-mon[53908]: osdmap e46: 8 total, 8 up, 8 in 2026-03-09T19:53:19.165 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:19 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:19.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:19 vm01 ceph-mon[49698]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:19.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:19 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-09T19:53:19.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:19 vm01 ceph-mon[49698]: osdmap e46: 8 total, 8 up, 8 in 2026-03-09T19:53:19.165 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:19 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:19.204 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin period update --rgw-realm=r --commit' 2026-03-09T19:53:19.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:19 vm06 ceph-mon[48187]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:19.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:19 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-09T19:53:19.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:19 vm06 ceph-mon[48187]: osdmap e46: 8 total, 8 up, 8 in 2026-03-09T19:53:19.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:19 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:20.130 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 systemd[1]: Starting Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:53:20.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:20 vm06 ceph-mon[48187]: osdmap e47: 8 total, 8 up, 8 in 2026-03-09T19:53:20.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:20 vm01 ceph-mon[49698]: osdmap e47: 8 total, 8 up, 8 in 2026-03-09T19:53:20.629 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 podman[71903]: 2026-03-09 19:53:20.130114889 +0000 UTC m=+0.016685878 volume create 04c587618bcd3f183ca1cb52b2db15c0a66f6b7104b57cfc43d0ab7fe099fb08 2026-03-09T19:53:20.629 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 podman[71903]: 2026-03-09 19:53:20.134126697 +0000 UTC m=+0.020697676 container create 1e24dc7f0e641a1a730e63f4eb15e49730ca547b8da0bce927d80b2f0c06899e (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:20.629 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 podman[71903]: 2026-03-09 19:53:20.219723369 +0000 UTC m=+0.106294358 container init 1e24dc7f0e641a1a730e63f4eb15e49730ca547b8da0bce927d80b2f0c06899e (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:20.629 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 podman[71903]: 2026-03-09 19:53:20.123408178 +0000 UTC m=+0.009979177 image pull ba2b418f427c0636d654de8757e830c80168e76482bcc46bb2138e569d6c91d4 quay.io/prometheus/alertmanager:v0.23.0 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 podman[71903]: 2026-03-09 19:53:20.223004669 +0000 UTC m=+0.109575658 container start 1e24dc7f0e641a1a730e63f4eb15e49730ca547b8da0bce927d80b2f0c06899e (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:20.233Z caller=main.go:225 msg="Starting Alertmanager" version="(version=0.23.0, branch=HEAD, revision=61046b17771a57cfd4c4a51be370ab930a4d7d54)" 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:20.233Z caller=main.go:226 build_context="(go=go1.16.7, user=root@e21a959be8d2, date=20210825-10:48:55)" 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:20.234Z caller=cluster.go:184 component=cluster msg="setting advertise address explicitly" addr=192.168.123.101 port=9094 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:20.234Z caller=cluster.go:671 component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 bash[71903]: 1e24dc7f0e641a1a730e63f4eb15e49730ca547b8da0bce927d80b2f0c06899e 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 systemd[1]: Started Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:20.284Z caller=coordinator.go:113 component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:20.285Z caller=coordinator.go:126 component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:20.303Z caller=main.go:518 msg=Listening address=:9093 2026-03-09T19:53:20.630 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:20.303Z caller=tls_config.go:191 msg="TLS is disabled." http2=false 2026-03-09T19:53:20.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:20 vm01 ceph-mon[53908]: osdmap e47: 8 total, 8 up, 8 in 2026-03-09T19:53:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:21 vm06 ceph-mon[48187]: pgmap v14: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:21 vm06 ceph-mon[48187]: osdmap e48: 8 total, 8 up, 8 in 2026-03-09T19:53:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:21 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T19:53:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:21 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3550894901' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T19:53:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:21 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:21 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:21 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:21 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T19:53:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:21 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[49698]: pgmap v14: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[49698]: osdmap e48: 8 total, 8 up, 8 in 2026-03-09T19:53:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T19:53:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3550894901' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T19:53:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T19:53:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[53908]: pgmap v14: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:21.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[53908]: osdmap e48: 8 total, 8 up, 8 in 2026-03-09T19:53:21.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T19:53:21.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3550894901' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T19:53:21.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:21.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T19:53:21.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:21 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:22.629 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:22.235Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000280147s 2026-03-09T19:53:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T19:53:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[49698]: Deploying daemon grafana.a on vm06 2026-03-09T19:53:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-09T19:53:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[49698]: osdmap e49: 8 total, 8 up, 8 in 2026-03-09T19:53:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[49698]: pgmap v17: 65 pgs: 32 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 4.0 KiB/s wr, 7 op/s 2026-03-09T19:53:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[49698]: osdmap e50: 8 total, 8 up, 8 in 2026-03-09T19:53:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T19:53:22.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3550894901' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T19:53:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T19:53:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[53908]: Deploying daemon grafana.a on vm06 2026-03-09T19:53:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-09T19:53:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[53908]: osdmap e49: 8 total, 8 up, 8 in 2026-03-09T19:53:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[53908]: pgmap v17: 65 pgs: 32 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 4.0 KiB/s wr, 7 op/s 2026-03-09T19:53:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[53908]: osdmap e50: 8 total, 8 up, 8 in 2026-03-09T19:53:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T19:53:22.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:22 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3550894901' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T19:53:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:22 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T19:53:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:22 vm06 ceph-mon[48187]: Deploying daemon grafana.a on vm06 2026-03-09T19:53:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:22 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-09T19:53:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:22 vm06 ceph-mon[48187]: osdmap e49: 8 total, 8 up, 8 in 2026-03-09T19:53:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:22 vm06 ceph-mon[48187]: pgmap v17: 65 pgs: 32 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 4.0 KiB/s wr, 7 op/s 2026-03-09T19:53:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:22 vm06 ceph-mon[48187]: osdmap e50: 8 total, 8 up, 8 in 2026-03-09T19:53:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:22 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T19:53:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:22 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3550894901' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T19:53:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:24 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-09T19:53:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:24 vm06 ceph-mon[48187]: osdmap e51: 8 total, 8 up, 8 in 2026-03-09T19:53:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:24 vm06 ceph-mon[48187]: pgmap v20: 97 pgs: 64 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 4.0 KiB/s wr, 7 op/s 2026-03-09T19:53:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:24 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:24 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-09T19:53:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:24 vm01 ceph-mon[49698]: osdmap e51: 8 total, 8 up, 8 in 2026-03-09T19:53:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:24 vm01 ceph-mon[49698]: pgmap v20: 97 pgs: 64 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 4.0 KiB/s wr, 7 op/s 2026-03-09T19:53:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:24 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:24.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:24 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-09T19:53:24.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:24 vm01 ceph-mon[53908]: osdmap e51: 8 total, 8 up, 8 in 2026-03-09T19:53:24.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:24 vm01 ceph-mon[53908]: pgmap v20: 97 pgs: 64 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 4.0 KiB/s wr, 7 op/s 2026-03-09T19:53:24.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:24 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:25.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:25 vm06 ceph-mon[48187]: osdmap e52: 8 total, 8 up, 8 in 2026-03-09T19:53:25.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:25 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-09T19:53:25.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:25 vm01 ceph-mon[49698]: osdmap e52: 8 total, 8 up, 8 in 2026-03-09T19:53:25.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:25 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-09T19:53:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:25 vm01 ceph-mon[53908]: osdmap e52: 8 total, 8 up, 8 in 2026-03-09T19:53:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:25 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-09T19:53:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:26 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-09T19:53:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:26 vm06 ceph-mon[48187]: osdmap e53: 8 total, 8 up, 8 in 2026-03-09T19:53:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:26 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-09T19:53:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:26 vm06 ceph-mon[48187]: pgmap v23: 129 pgs: 32 creating+peering, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 511 B/s wr, 6 op/s 2026-03-09T19:53:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:26 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-09T19:53:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:26 vm06 ceph-mon[48187]: osdmap e54: 8 total, 8 up, 8 in 2026-03-09T19:53:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:26 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-09T19:53:26.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-09T19:53:26.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[49698]: osdmap e53: 8 total, 8 up, 8 in 2026-03-09T19:53:26.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-09T19:53:26.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[49698]: pgmap v23: 129 pgs: 32 creating+peering, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 511 B/s wr, 6 op/s 2026-03-09T19:53:26.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-09T19:53:26.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[49698]: osdmap e54: 8 total, 8 up, 8 in 2026-03-09T19:53:26.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-09T19:53:26.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-09T19:53:26.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[53908]: osdmap e53: 8 total, 8 up, 8 in 2026-03-09T19:53:26.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-09T19:53:26.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[53908]: pgmap v23: 129 pgs: 32 creating+peering, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 511 B/s wr, 6 op/s 2026-03-09T19:53:26.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-09T19:53:26.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[53908]: osdmap e54: 8 total, 8 up, 8 in 2026-03-09T19:53:26.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:26 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-09T19:53:27.648 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T19:53:27.648 INFO:teuthology.orchestra.run.vm01.stdout: "id": "0a4664ee-d3a3-49ca-8fac-1e037f0245d5", 2026-03-09T19:53:27.648 INFO:teuthology.orchestra.run.vm01.stdout: "epoch": 1, 2026-03-09T19:53:27.648 INFO:teuthology.orchestra.run.vm01.stdout: "predecessor_uuid": "d6f7a670-52fd-4e75-a35f-282550727f6d", 2026-03-09T19:53:27.648 INFO:teuthology.orchestra.run.vm01.stdout: "sync_status": [], 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "period_map": { 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "id": "0a4664ee-d3a3-49ca-8fac-1e037f0245d5", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "zonegroups": [ 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "id": "2edc8848-5875-4d4f-bb57-33cd954f09a4", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "name": "default", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "api_name": "default", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "is_master": "true", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "endpoints": [], 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "hostnames": [], 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "hostnames_s3website": [], 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "master_zone": "a8535b29-1a67-4458-acaa-4ff96fd38692", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "zones": [ 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "id": "a8535b29-1a67-4458-acaa-4ff96fd38692", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "name": "z", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "endpoints": [], 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "log_meta": "false", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "log_data": "false", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "bucket_index_max_shards": 11, 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "read_only": "false", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "tier_type": "", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "sync_from_all": "true", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "sync_from": [], 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "redirect_zone": "" 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: ], 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "placement_targets": [ 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "name": "default-placement", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "tags": [], 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "storage_classes": [ 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "STANDARD" 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: ], 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "default_placement": "default-placement", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "realm_id": "e586d07c-0e08-48d4-9c44-0b27a56d84b7", 2026-03-09T19:53:27.649 INFO:teuthology.orchestra.run.vm01.stdout: "sync_policy": { 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "groups": [] 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: ], 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "short_zone_ids": [ 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "key": "a8535b29-1a67-4458-acaa-4ff96fd38692", 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "val": 3307526374 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "master_zonegroup": "2edc8848-5875-4d4f-bb57-33cd954f09a4", 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "master_zone": "a8535b29-1a67-4458-acaa-4ff96fd38692", 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "period_config": { 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "bucket_quota": { 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "enabled": false, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "check_on_raw": false, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_size": -1, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_size_kb": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_objects": -1 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "user_quota": { 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "enabled": false, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "check_on_raw": false, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_size": -1, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_size_kb": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_objects": -1 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "user_ratelimit": { 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_read_ops": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_write_ops": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_read_bytes": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_write_bytes": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "enabled": false 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "bucket_ratelimit": { 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_read_ops": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_write_ops": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_read_bytes": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_write_bytes": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "enabled": false 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "anonymous_ratelimit": { 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_read_ops": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_write_ops": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_read_bytes": 0, 2026-03-09T19:53:27.650 INFO:teuthology.orchestra.run.vm01.stdout: "max_write_bytes": 0, 2026-03-09T19:53:27.651 INFO:teuthology.orchestra.run.vm01.stdout: "enabled": false 2026-03-09T19:53:27.651 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:53:27.651 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:53:27.651 INFO:teuthology.orchestra.run.vm01.stdout: "realm_id": "e586d07c-0e08-48d4-9c44-0b27a56d84b7", 2026-03-09T19:53:27.651 INFO:teuthology.orchestra.run.vm01.stdout: "realm_name": "r", 2026-03-09T19:53:27.651 INFO:teuthology.orchestra.run.vm01.stdout: "realm_epoch": 2 2026-03-09T19:53:27.651 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T19:53:27.696 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000' 2026-03-09T19:53:28.217 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled rgw.foo update... 2026-03-09T19:53:28.266 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph osd pool create foo' 2026-03-09T19:53:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:28 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-09T19:53:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:28 vm06 ceph-mon[48187]: osdmap e55: 8 total, 8 up, 8 in 2026-03-09T19:53:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:28 vm06 ceph-mon[48187]: pgmap v26: 129 pgs: 32 creating+peering, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 511 B/s wr, 6 op/s 2026-03-09T19:53:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:28 vm06 ceph-mon[48187]: from='client.24496 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:28 vm06 ceph-mon[48187]: Saving service rgw.foo spec with placement count:2 2026-03-09T19:53:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:28 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:28.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-09T19:53:28.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[49698]: osdmap e55: 8 total, 8 up, 8 in 2026-03-09T19:53:28.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[49698]: pgmap v26: 129 pgs: 32 creating+peering, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 511 B/s wr, 6 op/s 2026-03-09T19:53:28.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[49698]: from='client.24496 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:28.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[49698]: Saving service rgw.foo spec with placement count:2 2026-03-09T19:53:28.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:28.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1294175391' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-09T19:53:28.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[53908]: osdmap e55: 8 total, 8 up, 8 in 2026-03-09T19:53:28.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[53908]: pgmap v26: 129 pgs: 32 creating+peering, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 511 B/s wr, 6 op/s 2026-03-09T19:53:28.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[53908]: from='client.24496 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:28.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[53908]: Saving service rgw.foo spec with placement count:2 2026-03-09T19:53:28.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:28 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:29.858 INFO:teuthology.orchestra.run.vm01.stderr:pool 'foo' created 2026-03-09T19:53:29.901 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'rbd pool init foo' 2026-03-09T19:53:30.079 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:29 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1848770824' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-09T19:53:30.079 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:29 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1848770824' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-09T19:53:30.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:29 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1848770824' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-09T19:53:30.379 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:30.237Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.002355731s 2026-03-09T19:53:31.314 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:53:30] "GET /metrics HTTP/1.1" 200 192180 "" "Prometheus/2.33.4" 2026-03-09T19:53:31.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:31 vm06 ceph-mon[48187]: pgmap v27: 129 pgs: 32 creating+peering, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 4.3 KiB/s rd, 384 B/s wr, 5 op/s 2026-03-09T19:53:31.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:31 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1848770824' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-09T19:53:31.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:31 vm06 ceph-mon[48187]: osdmap e56: 8 total, 8 up, 8 in 2026-03-09T19:53:31.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:31 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3059628681' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T19:53:31.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:31 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T19:53:31.464 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:31 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:53:31] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:53:31.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[49698]: pgmap v27: 129 pgs: 32 creating+peering, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 4.3 KiB/s rd, 384 B/s wr, 5 op/s 2026-03-09T19:53:31.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1848770824' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-09T19:53:31.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[49698]: osdmap e56: 8 total, 8 up, 8 in 2026-03-09T19:53:31.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3059628681' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T19:53:31.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T19:53:31.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[53908]: pgmap v27: 129 pgs: 32 creating+peering, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 4.3 KiB/s rd, 384 B/s wr, 5 op/s 2026-03-09T19:53:31.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1848770824' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-09T19:53:31.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[53908]: osdmap e56: 8 total, 8 up, 8 in 2026-03-09T19:53:31.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3059628681' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T19:53:31.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:31 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T19:53:31.993 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:31 vm06 systemd[1]: Starting Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:31 vm06 podman[63156]: 2026-03-09 19:53:31.993031941 +0000 UTC m=+0.018547401 container create bf76c513160b647dd882fafd782b807b5783039ce883639289b4352ead6e134c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, io.k8s.display-name=Red Hat Universal Base Image 8, distribution-scope=public, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, version=8.5, summary=Grafana Container configured for Ceph mgr/dashboard integration, name=ubi8, build-date=2022-03-28T10:36:18.413762, vendor=Red Hat, Inc., maintainer=Paul Cuzner , description=Ceph Grafana Container, com.redhat.component=ubi8-container, io.openshift.tags=base rhel8, io.buildah.version=1.24.2, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, release=236.1648460182, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 podman[63156]: 2026-03-09 19:53:32.028917335 +0000 UTC m=+0.054432795 container init bf76c513160b647dd882fafd782b807b5783039ce883639289b4352ead6e134c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, summary=Grafana Container configured for Ceph mgr/dashboard integration, maintainer=Paul Cuzner , com.redhat.component=ubi8-container, release=236.1648460182, io.k8s.display-name=Red Hat Universal Base Image 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, vcs-type=git, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=Ceph Grafana Container, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, distribution-scope=public, build-date=2022-03-28T10:36:18.413762, version=8.5, io.openshift.tags=base rhel8, io.buildah.version=1.24.2, io.openshift.expose-services=) 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 podman[63156]: 2026-03-09 19:53:32.032070717 +0000 UTC m=+0.057586168 container start bf76c513160b647dd882fafd782b807b5783039ce883639289b4352ead6e134c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, vcs-type=git, version=8.5, summary=Grafana Container configured for Ceph mgr/dashboard integration, architecture=x86_64, build-date=2022-03-28T10:36:18.413762, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, maintainer=Paul Cuzner , name=ubi8, io.openshift.tags=base rhel8, vendor=Red Hat, Inc., com.redhat.component=ubi8-container, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, io.buildah.version=1.24.2, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=Ceph Grafana Container, io.k8s.display-name=Red Hat Universal Base Image 8, release=236.1648460182, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 bash[63156]: bf76c513160b647dd882fafd782b807b5783039ce883639289b4352ead6e134c 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 podman[63156]: 2026-03-09 19:53:31.985116463 +0000 UTC m=+0.010631924 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 systemd[1]: Started Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="App mode production" logger=settings 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create migration_log table" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create user table" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.login" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.email" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_login - v1" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_email - v1" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table user to user_v1 - v1" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create user table v2" 2026-03-09T19:53:32.253 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_login - v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_email - v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table user_v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column help_flags1 to user table" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update user table charset" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add last_seen_at column to user" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add missing user data" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_disabled column to user" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index user.login/user.email" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_service_account column to user" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create temp user table v1-7" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v1-7" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v1-7" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v1-7" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v1-7" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update temp_user table charset" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_email - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_org_id - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_code - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_status - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table temp_user to temp_user_tmp_qwerty - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create temp_user v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="copy temp_user v1 to v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop temp_user_tmp_qwerty" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Set created for temp users that will otherwise prematurely expire" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create star table" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index star.user_id_dashboard_id" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create org table v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_name - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create org_user table v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_org_id - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_user_org_id_user_id - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_user_id - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update org table charset" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update org_user table charset" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate all Read Only Viewers to Viewers" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard table" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard.account_id" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_account_id_slug" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_tag table" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_tag.dasboard_id_term" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_dashboard_tag_dashboard_id_term - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard to dashboard_v1 - v1" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_org_id - v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_org_id_slug - v2" 2026-03-09T19:53:32.254 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard v1 to v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_v1" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard.data to mediumtext v1" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column updated_by in dashboard - v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column created_by in dashboard - v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column gnetId in dashboard" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for gnetId in dashboard" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_id in dashboard" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for plugin_id in dashboard" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_id in dashboard_tag" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard table charset" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_tag table charset" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column folder_id in dashboard" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column isFolder in dashboard" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column has_acl in dashboard" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in dashboard" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in dashboard" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index dashboard_org_id_uid" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_slug" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard title length" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index for dashboard_org_id_title_folder_id" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard_provisioning to dashboard_provisioning_tmp_qwerty - v1" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id - v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id_name - v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard_provisioning v1 to v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop dashboard_provisioning_tmp_qwerty" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add check_sum column" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_title" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="delete tags for deleted dashboards" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="delete stars for deleted dashboards" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_is_folder" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index data_source.account_id" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index data_source.account_id_name" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_data_source_account_id - v1" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_data_source_account_id_name - v1" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table data_source to data_source_v1 - v1" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_data_source_org_id - v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_data_source_org_id_name - v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table data_source_v1 #2" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column with_credentials" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add secure json data column" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update data_source table charset" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update initial version to 1" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add read_only data column" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate logging ds to loki ds" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update json_data with nulls" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add uid column" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid value" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index datasource_org_id_uid" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index datasource_org_id_is_default" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.key" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id_name" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_api_key_account_id - v1" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_key - v1" 2026-03-09T19:53:32.255 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_account_id_name - v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table api_key to api_key_v1 - v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_api_key_org_id - v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_key - v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_org_id_name - v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="copy api_key v1 to v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table api_key_v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update api_key table charset" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add expires to api_key table" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add service account foreign key" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v4" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_snapshot_v4 #1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v5 #2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_key - v5" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_delete_key - v5" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_snapshot_user_id - v5" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_snapshot to mediumtext v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_snapshot table charset" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external_delete_url to dashboard_snapshots table" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add encrypted dashboard json column" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Change dashboard_encrypted column to MEDIUMBLOB" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create quota table v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_quota_org_id_user_id_target - v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update quota table charset" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create plugin_setting table" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_plugin_setting_org_id_plugin_id - v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_version to plugin_settings" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update plugin_setting table charset" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create session table" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist table" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist_item table" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist table v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist item table v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist table charset" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist_item table charset" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v3" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create preferences table v3" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update preferences table charset" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column team_id in preferences" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update team_id column values in preferences" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column week_start in preferences" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create alert table v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert org_id & id " 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert state" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert dashboard_id" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_rule_tag.alert_id_tag_id" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_alert_rule_tag_alert_id_tag_id - v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table alert_rule_tag to alert_rule_tag_v1 - v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_alert_rule_tag_alert_id_tag_id - Add unique index alert_rule_tag.alert_id_tag_id V2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="copy alert_rule_tag v1 to v2" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop table alert_rule_tag_v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification table v1" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column is_default" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column frequency" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column send_reminder" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column disable_resolve_message" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification org_id & name" 2026-03-09T19:53:32.256 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert table charset" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert_notification table charset" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create notification_journal table v1" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index notification_journal org_id & alert_id & notifier_id" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_notification_journal" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification_state table v1" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification_state org_id & alert_id & notifier_id" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add for to alert table" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in alert_notification" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in alert_notification" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_notification_org_id_uid" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_name" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column secure_settings in alert_notification" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert.settings to mediumtext" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_notification_state_alert_id" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_rule_tag_alert_id" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old annotation table v4" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create annotation table v5" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 0 v3" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 1 v3" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 2 v3" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 3 v3" 2026-03-09T19:53:32.257 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 4 v3" 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[49698]: osdmap e57: 8 total, 8 up, 8 in 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[49698]: pgmap v30: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[53908]: osdmap e57: 8 total, 8 up, 8 in 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[53908]: pgmap v30: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:32.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:32 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:32.505 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:32 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-09T19:53:32.505 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:32 vm06 ceph-mon[48187]: osdmap e57: 8 total, 8 up, 8 in 2026-03-09T19:53:32.505 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:32 vm06 ceph-mon[48187]: pgmap v30: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-09T19:53:32.505 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:32 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:32.505 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:32 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:32.505 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:32 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:32.505 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update annotation table charset" 2026-03-09T19:53:32.505 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column region_id to annotation table" 2026-03-09T19:53:32.505 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Drop category_id index" 2026-03-09T19:53:32.505 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column tags to annotation table" 2026-03-09T19:53:32.505 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v2" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index annotation_tag.annotation_id_tag_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_annotation_tag_annotation_id_tag_id - v2" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table annotation_tag to annotation_tag_v2 - v2" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v3" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_annotation_tag_annotation_id_tag_id - Add unique index annotation_tag.annotation_id_tag_id V3" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="copy annotation_tag v2 to v3" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop table annotation_tag_v2" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert annotations and set TEXT to empty" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add created time to annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add updated time to annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for created in annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for updated in annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Convert existing annotations from seconds to milliseconds" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add epoch_end column" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for epoch_end" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Make epoch_end the same as epoch" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Move region to single row" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch from annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_dashboard_id_panel_id_epoch from annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_dashboard_id_epoch_end_epoch on annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_epoch_end_epoch on annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch_epoch_end from annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for alert_id on annotation table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create test_data table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_version table v1" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_version.dashboard_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_version.dashboard_id and dashboard_version.version" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Set dashboard version to 1 where 0" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="save existing dashboard data in dashboard_version table v1" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_version.data to mediumtext v1" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create team table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index team.org_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_org_id_name" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create team member table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.org_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_member_org_id_team_id_user_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.team_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column email to team table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external to team_member table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column permission to team_member table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard acl table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_dashboard_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_user_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_team_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_user_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_team_id" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_org_id_role" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_permission" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="save default acl rules in dashboard_acl table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="delete acl rules for deleted dashboards and folders" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create tag table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index tag.key_value" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create login attempt table" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index login_attempt.username" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_login_attempt_username - v1" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table login_attempt to login_attempt_tmp_qwerty - v1" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create login_attempt v2" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_login_attempt_username - v2" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="copy login_attempt v1 to v2" 2026-03-09T19:53:32.506 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop login_attempt_tmp_qwerty" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_user_auth_auth_module_auth_id - v1" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alter user_auth.auth_id to length 190" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth access token to user_auth" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth refresh token to user_auth" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth token type to user_auth" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth expiry to user_auth" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add index to user_id column in user_auth" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create server_lock table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index server_lock.operation_uid" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth token table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.auth_token" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.prev_auth_token" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_auth_token.user_id" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add revoked_at to the user auth token" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create cache_data table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index cache_data.cache_key" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create short_url table v1" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index short_url.org_id-uid" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and title columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and uid columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition table data column to mediumtext in mysql" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and title columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and uid columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and title columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and uid columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column paused in alert_definition" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition_version table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition_version table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_id and version columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_uid and version columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition_version table data column to mediumtext in mysql" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition_version table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_instance table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, def_uid and current_state columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, current_state columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column current_state_end to alert_instance" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, def_uid, current_state on alert_instance" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, current_state on alert_instance" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_org_id to rule_org_id in alert_instance" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_uid to rule_uid in alert_instance" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, rule_uid, current_state on alert_instance" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, current_state on alert_instance" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule table" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and title columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and uid columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespace_uid, group_uid columns" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule table data column to mediumtext in mysql" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule" 2026-03-09T19:53:32.507 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="remove unique index from alert_rule on org_id, title columns" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespase_uid and title columns" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add dashboard_uid column to alert_rule" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add panel_id column to alert_rule" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, dashboard_uid and panel_id columns" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule_version table" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_uid and version columns" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_namespace_uid and rule_group columns" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule_version table data column to mediumtext in mysql" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule_version" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule_version" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule_version" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id=create_alert_configuration_table 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column default in alert_configuration" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="alert alert_configuration alertmanager_configuration column from TEXT to MEDIUMTEXT if mysql" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column org_id in alert_configuration" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_configuration table on org_id column" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id=create_ngalert_configuration_table 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index in ngalert_configuration on org_id column" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="clear migration entry \"remove unified alerting data\"" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="move dashboard alerts to unified alerting" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element table v1" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element org_id-folder_id-name-kind" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element_connection table v1" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element_connection element_id-kind-connection_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index library_element org_id_uid" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="clone move dashboard alerts to unified alerting" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create data_keys table" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create kv_store table v1" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index kv_store.org_id-namespace-key" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="update dashboard_uid and panel_id from existing annotations" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create permission table" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index permission.role_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_id_action_scope" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create role table" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column display_name" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add column group_name" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index role.org_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_org_id_name" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index role_org_id_uid" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create team role table" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.org_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_role_org_id_team_id_role_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.team_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create user role table" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.org_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_role_org_id_user_id_role_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.user_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create builtin role table" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.role_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.name" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Add column org_id to builtin_role table" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.org_id" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_org_id_role_id_role" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index role_org_id_uid" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role.uid" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="create seed assignment table" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_role_name" 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="migrations completed" logger=migrator performed=381 skipped=0 duration=339.713413ms 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-09T19:53:32.508 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-09T19:53:32.509 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-09T19:53:32.509 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-09T19:53:32.509 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=warn msg="[Deprecated] the datasource provisioning config is outdated. please upgrade" logger=provisioning.datasources filename=/etc/grafana/provisioning/datasources/ceph-dashboard.yml 2026-03-09T19:53:32.842 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-09T19:53:32.842 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-09T19:53:32.842 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-09T19:53:32.842 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 19:53:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T19:53:32+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-09T19:53:33.408 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply iscsi foo u p' 2026-03-09T19:53:33.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: osdmap e58: 8 total, 8 up, 8 in 2026-03-09T19:53:33.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: Saving service rgw.foo spec with placement count:2 2026-03-09T19:53:33.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.531 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[49698]: Deploying daemon rgw.foo.vm01.zyuelz on vm01 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: osdmap e58: 8 total, 8 up, 8 in 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: Saving service rgw.foo spec with placement count:2 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:33.532 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:33 vm01 ceph-mon[53908]: Deploying daemon rgw.foo.vm01.zyuelz on vm01 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: osdmap e58: 8 total, 8 up, 8 in 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: Saving service rgw.foo spec with placement count:2 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:33 vm06 ceph-mon[48187]: Deploying daemon rgw.foo.vm01.zyuelz on vm01 2026-03-09T19:53:34.295 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled iscsi.foo update... 2026-03-09T19:53:34.358 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T19:53:34.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[53908]: osdmap e59: 8 total, 8 up, 8 in 2026-03-09T19:53:34.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[53908]: pgmap v33: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1.7 KiB/s wr, 4 op/s 2026-03-09T19:53:34.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:34.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:34.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:34.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:34.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:34.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T19:53:34.581 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[49698]: osdmap e59: 8 total, 8 up, 8 in 2026-03-09T19:53:34.581 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[49698]: pgmap v33: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1.7 KiB/s wr, 4 op/s 2026-03-09T19:53:34.581 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:34.581 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:34.581 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:34.581 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:34.581 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:34.581 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:34 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T19:53:34.753 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:34 vm06 ceph-mon[48187]: osdmap e59: 8 total, 8 up, 8 in 2026-03-09T19:53:34.754 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:34 vm06 ceph-mon[48187]: pgmap v33: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1.7 KiB/s wr, 4 op/s 2026-03-09T19:53:34.754 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:34 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:34.754 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:34 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:34.754 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:34 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:34.754 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:34 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T19:53:34.754 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:34 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:34.754 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:34 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T19:53:35.607 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:35 vm06 ceph-mon[48187]: from='client.24485 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:35.607 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:35 vm06 ceph-mon[48187]: Saving service iscsi.foo spec with placement count:1 2026-03-09T19:53:35.607 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:35 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:35.607 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:35 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:35.607 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:35 vm06 ceph-mon[48187]: Deploying daemon rgw.foo.vm06.lmiczb on vm06 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[49698]: from='client.24485 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[49698]: Saving service iscsi.foo spec with placement count:1 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[49698]: Deploying daemon rgw.foo.vm06.lmiczb on vm06 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[53908]: from='client.24485 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[53908]: Saving service iscsi.foo spec with placement count:1 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:35.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:35 vm01 ceph-mon[53908]: Deploying daemon rgw.foo.vm06.lmiczb on vm06 2026-03-09T19:53:36.855 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:36 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:36.855 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:36 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:36.855 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:36 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:36.855 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:36 vm01 ceph-mon[49698]: pgmap v34: 161 pgs: 161 active+clean; 456 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 6.0 KiB/s wr, 30 op/s 2026-03-09T19:53:36.856 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:36 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:36.856 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:36 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:36.856 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:36 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:36.856 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:36 vm01 ceph-mon[53908]: pgmap v34: 161 pgs: 161 active+clean; 456 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 6.0 KiB/s wr, 30 op/s 2026-03-09T19:53:36.931 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:36 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:36.931 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:36 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:36.931 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:36 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:36.931 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:36 vm06 ceph-mon[48187]: pgmap v34: 161 pgs: 161 active+clean; 456 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 6.0 KiB/s wr, 30 op/s 2026-03-09T19:53:38.033 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.033 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.033 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: Checking dashboard <-> RGW credentials 2026-03-09T19:53:38.033 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: osdmap e60: 8 total, 8 up, 8 in 2026-03-09T19:53:38.033 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.033 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.033 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: Checking dashboard <-> RGW credentials 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: osdmap e60: 8 total, 8 up, 8 in 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-09T19:53:38.034 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:37 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: Checking dashboard <-> RGW credentials 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: osdmap e60: 8 total, 8 up, 8 in 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-09T19:53:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:37 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:39.038 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:38 vm06 ceph-mon[48187]: pgmap v36: 161 pgs: 161 active+clean; 456 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 5.0 KiB/s rd, 5.8 KiB/s wr, 29 op/s 2026-03-09T19:53:39.039 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:38 vm06 ceph-mon[48187]: Checking pool "foo" exists for service iscsi.foo 2026-03-09T19:53:39.039 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:38 vm06 ceph-mon[48187]: Deploying daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T19:53:39.039 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:38 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:39.039 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:38 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:39.039 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:38 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:39.039 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:38 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[49698]: pgmap v36: 161 pgs: 161 active+clean; 456 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 5.0 KiB/s rd, 5.8 KiB/s wr, 29 op/s 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[49698]: Checking pool "foo" exists for service iscsi.foo 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[49698]: Deploying daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[53908]: pgmap v36: 161 pgs: 161 active+clean; 456 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 5.0 KiB/s rd, 5.8 KiB/s wr, 29 op/s 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[53908]: Checking pool "foo" exists for service iscsi.foo 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[53908]: Deploying daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:39.076 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:38 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:39 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/4436963' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T19:53:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:39 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1443884196' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/188615391"}]: dispatch 2026-03-09T19:53:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:39 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/188615391"}]: dispatch 2026-03-09T19:53:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:39 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:40.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:39 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/4436963' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T19:53:40.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:39 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1443884196' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/188615391"}]: dispatch 2026-03-09T19:53:40.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:39 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/188615391"}]: dispatch 2026-03-09T19:53:40.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:39 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:39 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/4436963' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T19:53:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:39 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1443884196' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/188615391"}]: dispatch 2026-03-09T19:53:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:39 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/188615391"}]: dispatch 2026-03-09T19:53:40.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:39 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:40.490 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 systemd[1]: Stopping Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:53:40.490 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 bash[75003]: Error: no container with name or ID "ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager.a" found: no such container 2026-03-09T19:53:40.490 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[71914]: level=info ts=2026-03-09T19:53:40.475Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 podman[75011]: 2026-03-09 19:53:40.489588056 +0000 UTC m=+0.030307801 container died 1e24dc7f0e641a1a730e63f4eb15e49730ca547b8da0bce927d80b2f0c06899e (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 podman[75011]: 2026-03-09 19:53:40.504826655 +0000 UTC m=+0.045546400 container remove 1e24dc7f0e641a1a730e63f4eb15e49730ca547b8da0bce927d80b2f0c06899e (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 podman[75011]: 2026-03-09 19:53:40.506382928 +0000 UTC m=+0.047102682 volume remove 04c587618bcd3f183ca1cb52b2db15c0a66f6b7104b57cfc43d0ab7fe099fb08 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 bash[75011]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 bash[75032]: Error: no container with name or ID "ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager.a" found: no such container 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@alertmanager.a.service: Deactivated successfully. 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 systemd[1]: Stopped Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 systemd[1]: Starting Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 podman[75073]: 2026-03-09 19:53:40.664509377 +0000 UTC m=+0.045114481 volume create de6c0e601f713dc9a6ae40700acd6248547aff6de45ef641c613c21e3309d80b 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 podman[75073]: 2026-03-09 19:53:40.66767942 +0000 UTC m=+0.048284524 container create cf3634b7566553e2fd345cbc95c88e638798b2c400410974e4fe3acd9b6ea90b (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 podman[75073]: 2026-03-09 19:53:40.705614634 +0000 UTC m=+0.086219748 container init cf3634b7566553e2fd345cbc95c88e638798b2c400410974e4fe3acd9b6ea90b (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 podman[75073]: 2026-03-09 19:53:40.70932631 +0000 UTC m=+0.089931414 container start cf3634b7566553e2fd345cbc95c88e638798b2c400410974e4fe3acd9b6ea90b (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 bash[75073]: cf3634b7566553e2fd345cbc95c88e638798b2c400410974e4fe3acd9b6ea90b 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 podman[75073]: 2026-03-09 19:53:40.629963696 +0000 UTC m=+0.010568810 image pull ba2b418f427c0636d654de8757e830c80168e76482bcc46bb2138e569d6c91d4 quay.io/prometheus/alertmanager:v0.23.0 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 systemd[1]: Started Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:40.733Z caller=main.go:225 msg="Starting Alertmanager" version="(version=0.23.0, branch=HEAD, revision=61046b17771a57cfd4c4a51be370ab930a4d7d54)" 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:40.733Z caller=main.go:226 build_context="(go=go1.16.7, user=root@e21a959be8d2, date=20210825-10:48:55)" 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:40.734Z caller=cluster.go:184 component=cluster msg="setting advertise address explicitly" addr=192.168.123.101 port=9094 2026-03-09T19:53:40.763 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:40.735Z caller=cluster.go:671 component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-09T19:53:41.031 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:53:40] "GET /metrics HTTP/1.1" 200 197460 "" "Prometheus/2.33.4" 2026-03-09T19:53:41.031 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:40.761Z caller=coordinator.go:113 component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T19:53:41.031 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:40.763Z caller=coordinator.go:126 component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T19:53:41.031 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:40.764Z caller=main.go:518 msg=Listening address=:9093 2026-03-09T19:53:41.031 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:40.764Z caller=tls_config.go:191 msg="TLS is disabled." http2=false 2026-03-09T19:53:41.031 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: pgmap v37: 161 pgs: 161 active+clean; 456 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 4.1 KiB/s rd, 4.8 KiB/s wr, 24 op/s 2026-03-09T19:53:41.031 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/188615391"}]': finished 2026-03-09T19:53:41.031 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: osdmap e61: 8 total, 8 up, 8 in 2026-03-09T19:53:41.031 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: mgrmap e20: y(active, since 36s), standbys: x 2026-03-09T19:53:41.031 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:41.031 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:41.031 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T19:53:41.031 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: Reconfiguring daemon alertmanager.a on vm01 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2186673726' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1206756272"}]: dispatch 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: pgmap v37: 161 pgs: 161 active+clean; 456 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 4.1 KiB/s rd, 4.8 KiB/s wr, 24 op/s 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/188615391"}]': finished 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: osdmap e61: 8 total, 8 up, 8 in 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: mgrmap e20: y(active, since 36s), standbys: x 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: Reconfiguring daemon alertmanager.a on vm01 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2186673726' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1206756272"}]: dispatch 2026-03-09T19:53:41.032 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:40 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:41.049 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: pgmap v37: 161 pgs: 161 active+clean; 456 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 4.1 KiB/s rd, 4.8 KiB/s wr, 24 op/s 2026-03-09T19:53:41.050 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/188615391"}]': finished 2026-03-09T19:53:41.050 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: osdmap e61: 8 total, 8 up, 8 in 2026-03-09T19:53:41.050 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: mgrmap e20: y(active, since 36s), standbys: x 2026-03-09T19:53:41.050 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:41.050 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:41.050 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T19:53:41.050 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: Reconfiguring daemon alertmanager.a on vm01 2026-03-09T19:53:41.050 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2186673726' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1206756272"}]: dispatch 2026-03-09T19:53:41.050 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:40 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 systemd[1]: Stopping Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 bash[65171]: Error: no container with name or ID "ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus.a" found: no such container 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.189Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.190Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.190Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.190Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.190Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.190Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.190Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.190Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.190Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.192Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T19:53:41.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.192Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-09T19:53:41.343 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[62883]: ts=2026-03-09T19:53:41.192Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-09T19:53:41.343 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 podman[65178]: 2026-03-09 19:53:41.195159384 +0000 UTC m=+0.021491866 container died 09e75a279884d690ee18628a0f2ae4d4c8928a32f8efa2e305941deba0798749 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:41.343 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 podman[65178]: 2026-03-09 19:53:41.214740051 +0000 UTC m=+0.041072543 container remove 09e75a279884d690ee18628a0f2ae4d4c8928a32f8efa2e305941deba0798749 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:41.343 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 bash[65178]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a 2026-03-09T19:53:41.343 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 bash[65195]: Error: no container with name or ID "ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus.a" found: no such container 2026-03-09T19:53:41.343 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a.service: Deactivated successfully. 2026-03-09T19:53:41.343 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 systemd[1]: Stopped Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:53:41.343 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 systemd[1]: Starting Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:53:41.621 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 podman[65239]: 2026-03-09 19:53:41.365702047 +0000 UTC m=+0.023526534 container create 6204e0dc9649e13f39e78ab2b561defb3e598893d81c9f3455630d6036ef4ec1 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:41.621 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 podman[65239]: 2026-03-09 19:53:41.424518439 +0000 UTC m=+0.082342926 container init 6204e0dc9649e13f39e78ab2b561defb3e598893d81c9f3455630d6036ef4ec1 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:41.621 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 podman[65239]: 2026-03-09 19:53:41.431193573 +0000 UTC m=+0.089018050 container start 6204e0dc9649e13f39e78ab2b561defb3e598893d81c9f3455630d6036ef4ec1 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 bash[65239]: 6204e0dc9649e13f39e78ab2b561defb3e598893d81c9f3455630d6036ef4ec1 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 podman[65239]: 2026-03-09 19:53:41.356914861 +0000 UTC m=+0.014739358 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 systemd[1]: Started Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.466Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.466Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.466Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.466Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm06 (none))" 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.466Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.466Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.467Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.468Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.469Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.469Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.272µs 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.469Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T19:53:41.622 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:41.473Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2186673726' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1206756272"}]': finished 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: osdmap e62: 8 total, 8 up, 8 in 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2310083507' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2915987933"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.101:9093"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.106:9095"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.106:3000"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:42.309 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:42 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2186673726' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1206756272"}]': finished 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: osdmap e62: 8 total, 8 up, 8 in 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2310083507' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2915987933"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.101:9093"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.106:9095"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.106:3000"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2186673726' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1206756272"}]': finished 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: osdmap e62: 8 total, 8 up, 8 in 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2310083507' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2915987933"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.101:9093"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.106:9095"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.106:3000"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:53:42.352 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:42 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:53:42.881 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:42 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:42.881Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=1 2026-03-09T19:53:42.881 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:42 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:42.881Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=1 2026-03-09T19:53:42.881 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:42 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:42.881Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=18.054µs wal_replay_duration=1.412485053s total_replay_duration=1.412511082s 2026-03-09T19:53:43.106 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:42 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:42.735Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000427345s 2026-03-09T19:53:43.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:42 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:42.884Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T19:53:43.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:42 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:42.884Z caller=main.go:947 level=info msg="TSDB started" 2026-03-09T19:53:43.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:42 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:42.884Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T19:53:43.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:42 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:42.897Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=13.011754ms db_storage=531ns remote_storage=1.142µs web_handler=290ns query_engine=661ns scrape=564.117µs scrape_sd=17.703µs notify=14.667µs notify_sd=4.759µs rules=12.182601ms 2026-03-09T19:53:43.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 19:53:42 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T19:53:42.897Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.101:9093"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: Adding iSCSI gateway http://:@192.168.123.101:5000 to Dashboard 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.106:9095"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.106:3000"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 187 KiB/s rd, 682 B/s wr, 333 op/s 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2310083507' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2915987933"}]': finished 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: osdmap e63: 8 total, 8 up, 8 in 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1810808929' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/960219321"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/960219321"}]: dispatch 2026-03-09T19:53:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:43 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:43.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T19:53:43.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.101:9093"}]: dispatch 2026-03-09T19:53:43.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: Adding iSCSI gateway http://:@192.168.123.101:5000 to Dashboard 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.106:9095"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.106:3000"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 187 KiB/s rd, 682 B/s wr, 333 op/s 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2310083507' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2915987933"}]': finished 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: osdmap e63: 8 total, 8 up, 8 in 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1810808929' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/960219321"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/960219321"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.101:9093"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: Adding iSCSI gateway http://:@192.168.123.101:5000 to Dashboard 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.106:9095"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.106:3000"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 187 KiB/s rd, 682 B/s wr, 333 op/s 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2310083507' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2915987933"}]': finished 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: osdmap e63: 8 total, 8 up, 8 in 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1810808929' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/960219321"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/960219321"}]: dispatch 2026-03-09T19:53:43.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:43 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/960219321"}]': finished 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[49698]: osdmap e64: 8 total, 8 up, 8 in 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[49698]: Checking dashboard <-> RGW credentials 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/292541543' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/884642165"}]: dispatch 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/960219321"}]': finished 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[53908]: osdmap e64: 8 total, 8 up, 8 in 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[53908]: Checking dashboard <-> RGW credentials 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/292541543' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/884642165"}]: dispatch 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:44 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:44.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:44 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/960219321"}]': finished 2026-03-09T19:53:44.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:44 vm06 ceph-mon[48187]: osdmap e64: 8 total, 8 up, 8 in 2026-03-09T19:53:44.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:44 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:44.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:44 vm06 ceph-mon[48187]: Checking dashboard <-> RGW credentials 2026-03-09T19:53:44.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:44 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/292541543' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/884642165"}]: dispatch 2026-03-09T19:53:44.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:44 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:44.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:44 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:53:45.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:45 vm06 ceph-mon[48187]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 281 KiB/s rd, 1023 B/s wr, 500 op/s 2026-03-09T19:53:45.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:45 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/292541543' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/884642165"}]': finished 2026-03-09T19:53:45.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:45 vm06 ceph-mon[48187]: osdmap e65: 8 total, 8 up, 8 in 2026-03-09T19:53:45.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:45 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3083244995' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/413365033"}]: dispatch 2026-03-09T19:53:45.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:45 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/413365033"}]: dispatch 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[49698]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 281 KiB/s rd, 1023 B/s wr, 500 op/s 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/292541543' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/884642165"}]': finished 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[49698]: osdmap e65: 8 total, 8 up, 8 in 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3083244995' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/413365033"}]: dispatch 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/413365033"}]: dispatch 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[53908]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 58 MiB used, 160 GiB / 160 GiB avail; 281 KiB/s rd, 1023 B/s wr, 500 op/s 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/292541543' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/884642165"}]': finished 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[53908]: osdmap e65: 8 total, 8 up, 8 in 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3083244995' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/413365033"}]: dispatch 2026-03-09T19:53:45.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:45 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/413365033"}]: dispatch 2026-03-09T19:53:46.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:46 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/413365033"}]': finished 2026-03-09T19:53:46.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:46 vm06 ceph-mon[48187]: osdmap e66: 8 total, 8 up, 8 in 2026-03-09T19:53:46.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:46 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/413365033"}]: dispatch 2026-03-09T19:53:46.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:46 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3144249544' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/413365033"}]: dispatch 2026-03-09T19:53:46.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:46 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/413365033"}]': finished 2026-03-09T19:53:46.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:46 vm01 ceph-mon[49698]: osdmap e66: 8 total, 8 up, 8 in 2026-03-09T19:53:46.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:46 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/413365033"}]: dispatch 2026-03-09T19:53:46.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:46 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3144249544' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/413365033"}]: dispatch 2026-03-09T19:53:46.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:46 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/413365033"}]': finished 2026-03-09T19:53:46.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:46 vm01 ceph-mon[53908]: osdmap e66: 8 total, 8 up, 8 in 2026-03-09T19:53:46.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:46 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/413365033"}]: dispatch 2026-03-09T19:53:46.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:46 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3144249544' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/413365033"}]: dispatch 2026-03-09T19:53:47.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:47 vm06 ceph-mon[48187]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T19:53:47.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:47 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/413365033"}]': finished 2026-03-09T19:53:47.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:47 vm06 ceph-mon[48187]: osdmap e67: 8 total, 8 up, 8 in 2026-03-09T19:53:47.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:47 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1294728954' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2124962822"}]: dispatch 2026-03-09T19:53:47.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:47 vm01 ceph-mon[49698]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T19:53:47.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:47 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/413365033"}]': finished 2026-03-09T19:53:47.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:47 vm01 ceph-mon[49698]: osdmap e67: 8 total, 8 up, 8 in 2026-03-09T19:53:47.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:47 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1294728954' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2124962822"}]: dispatch 2026-03-09T19:53:47.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:47 vm01 ceph-mon[53908]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T19:53:47.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:47 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/413365033"}]': finished 2026-03-09T19:53:47.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:47 vm01 ceph-mon[53908]: osdmap e67: 8 total, 8 up, 8 in 2026-03-09T19:53:47.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:47 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1294728954' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2124962822"}]: dispatch 2026-03-09T19:53:48.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:48 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1294728954' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2124962822"}]': finished 2026-03-09T19:53:48.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:48 vm06 ceph-mon[48187]: osdmap e68: 8 total, 8 up, 8 in 2026-03-09T19:53:48.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:48 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2872943542' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/895788608"}]: dispatch 2026-03-09T19:53:48.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:48 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/895788608"}]: dispatch 2026-03-09T19:53:48.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:48 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1294728954' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2124962822"}]': finished 2026-03-09T19:53:48.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:48 vm01 ceph-mon[49698]: osdmap e68: 8 total, 8 up, 8 in 2026-03-09T19:53:48.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:48 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2872943542' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/895788608"}]: dispatch 2026-03-09T19:53:48.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:48 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/895788608"}]: dispatch 2026-03-09T19:53:48.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:48 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1294728954' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2124962822"}]': finished 2026-03-09T19:53:48.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:48 vm01 ceph-mon[53908]: osdmap e68: 8 total, 8 up, 8 in 2026-03-09T19:53:48.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:48 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2872943542' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/895788608"}]: dispatch 2026-03-09T19:53:48.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:48 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/895788608"}]: dispatch 2026-03-09T19:53:49.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:49 vm06 ceph-mon[48187]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T19:53:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:49 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/895788608"}]': finished 2026-03-09T19:53:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:49 vm06 ceph-mon[48187]: osdmap e69: 8 total, 8 up, 8 in 2026-03-09T19:53:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:49 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/543667527' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/500595740"}]: dispatch 2026-03-09T19:53:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:49 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/500595740"}]: dispatch 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[49698]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/895788608"}]': finished 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[49698]: osdmap e69: 8 total, 8 up, 8 in 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/543667527' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/500595740"}]: dispatch 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/500595740"}]: dispatch 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[53908]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/895788608"}]': finished 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[53908]: osdmap e69: 8 total, 8 up, 8 in 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/543667527' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/500595740"}]: dispatch 2026-03-09T19:53:49.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:49 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/500595740"}]: dispatch 2026-03-09T19:53:50.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:50 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:53:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:50 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/500595740"}]': finished 2026-03-09T19:53:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:50 vm06 ceph-mon[48187]: osdmap e70: 8 total, 8 up, 8 in 2026-03-09T19:53:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:50 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/48294561' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3427283308"}]: dispatch 2026-03-09T19:53:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:50 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3427283308"}]: dispatch 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/500595740"}]': finished 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[49698]: osdmap e70: 8 total, 8 up, 8 in 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/48294561' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3427283308"}]: dispatch 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3427283308"}]: dispatch 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/500595740"}]': finished 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[53908]: osdmap e70: 8 total, 8 up, 8 in 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/48294561' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3427283308"}]: dispatch 2026-03-09T19:53:50.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:50 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3427283308"}]: dispatch 2026-03-09T19:53:50.992 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:53:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:53:50.739Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.004106519s 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:53:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:53:50] "GET /metrics HTTP/1.1" 200 207658 "" "Prometheus/2.33.4" 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[49698]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3427283308"}]': finished 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[49698]: osdmap e71: 8 total, 8 up, 8 in 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/4101403434"}]: dispatch 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1722393494' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/4101403434"}]: dispatch 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[53908]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3427283308"}]': finished 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[53908]: osdmap e71: 8 total, 8 up, 8 in 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/4101403434"}]: dispatch 2026-03-09T19:53:51.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:51 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1722393494' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/4101403434"}]: dispatch 2026-03-09T19:53:51.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:51 vm06 ceph-mon[48187]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:53:51.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:51 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3427283308"}]': finished 2026-03-09T19:53:51.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:51 vm06 ceph-mon[48187]: osdmap e71: 8 total, 8 up, 8 in 2026-03-09T19:53:51.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:51 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/4101403434"}]: dispatch 2026-03-09T19:53:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:51 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1722393494' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/4101403434"}]: dispatch 2026-03-09T19:53:51.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:53:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:53:51] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:53:52.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:52 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/4101403434"}]': finished 2026-03-09T19:53:52.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:52 vm06 ceph-mon[48187]: osdmap e72: 8 total, 8 up, 8 in 2026-03-09T19:53:52.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:52 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2641272864' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3556214780"}]: dispatch 2026-03-09T19:53:52.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:52 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3556214780"}]: dispatch 2026-03-09T19:53:52.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:52 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/4101403434"}]': finished 2026-03-09T19:53:52.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:52 vm01 ceph-mon[49698]: osdmap e72: 8 total, 8 up, 8 in 2026-03-09T19:53:52.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:52 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2641272864' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3556214780"}]: dispatch 2026-03-09T19:53:52.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:52 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3556214780"}]: dispatch 2026-03-09T19:53:52.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:52 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/4101403434"}]': finished 2026-03-09T19:53:52.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:52 vm01 ceph-mon[53908]: osdmap e72: 8 total, 8 up, 8 in 2026-03-09T19:53:52.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:52 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2641272864' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3556214780"}]: dispatch 2026-03-09T19:53:52.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:52 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3556214780"}]: dispatch 2026-03-09T19:53:53.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:53 vm06 ceph-mon[48187]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:53:53.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:53 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3556214780"}]': finished 2026-03-09T19:53:53.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:53 vm06 ceph-mon[48187]: osdmap e73: 8 total, 8 up, 8 in 2026-03-09T19:53:53.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:53 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2678095736' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1206756272"}]: dispatch 2026-03-09T19:53:53.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:53 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1206756272"}]: dispatch 2026-03-09T19:53:53.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[49698]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:53:53.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3556214780"}]': finished 2026-03-09T19:53:53.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[49698]: osdmap e73: 8 total, 8 up, 8 in 2026-03-09T19:53:53.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2678095736' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1206756272"}]: dispatch 2026-03-09T19:53:53.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1206756272"}]: dispatch 2026-03-09T19:53:53.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[53908]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:53:53.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3556214780"}]': finished 2026-03-09T19:53:53.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[53908]: osdmap e73: 8 total, 8 up, 8 in 2026-03-09T19:53:53.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2678095736' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1206756272"}]: dispatch 2026-03-09T19:53:53.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:53 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1206756272"}]: dispatch 2026-03-09T19:53:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:54 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1206756272"}]': finished 2026-03-09T19:53:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:54 vm06 ceph-mon[48187]: osdmap e74: 8 total, 8 up, 8 in 2026-03-09T19:53:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:54 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1430240795' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/4101403434"}]: dispatch 2026-03-09T19:53:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:54 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/4101403434"}]: dispatch 2026-03-09T19:53:54.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:54 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1206756272"}]': finished 2026-03-09T19:53:54.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:54 vm01 ceph-mon[49698]: osdmap e74: 8 total, 8 up, 8 in 2026-03-09T19:53:54.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:54 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1430240795' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/4101403434"}]: dispatch 2026-03-09T19:53:54.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:54 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/4101403434"}]: dispatch 2026-03-09T19:53:54.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:54 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1206756272"}]': finished 2026-03-09T19:53:54.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:54 vm01 ceph-mon[53908]: osdmap e74: 8 total, 8 up, 8 in 2026-03-09T19:53:54.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:54 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1430240795' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/4101403434"}]: dispatch 2026-03-09T19:53:54.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:54 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/4101403434"}]: dispatch 2026-03-09T19:53:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:55 vm06 ceph-mon[48187]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:53:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:55 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/4101403434"}]': finished 2026-03-09T19:53:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:55 vm06 ceph-mon[48187]: osdmap e75: 8 total, 8 up, 8 in 2026-03-09T19:53:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:55 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3042040946' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2245030288"}]: dispatch 2026-03-09T19:53:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:55 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2245030288"}]: dispatch 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[49698]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/4101403434"}]': finished 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[49698]: osdmap e75: 8 total, 8 up, 8 in 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3042040946' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2245030288"}]: dispatch 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2245030288"}]: dispatch 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[53908]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/4101403434"}]': finished 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[53908]: osdmap e75: 8 total, 8 up, 8 in 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3042040946' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2245030288"}]: dispatch 2026-03-09T19:53:55.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:55 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2245030288"}]: dispatch 2026-03-09T19:53:56.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:56 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2245030288"}]': finished 2026-03-09T19:53:56.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:56 vm06 ceph-mon[48187]: osdmap e76: 8 total, 8 up, 8 in 2026-03-09T19:53:56.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:56 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2245030288"}]': finished 2026-03-09T19:53:56.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:56 vm01 ceph-mon[49698]: osdmap e76: 8 total, 8 up, 8 in 2026-03-09T19:53:56.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:56 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2245030288"}]': finished 2026-03-09T19:53:56.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:56 vm01 ceph-mon[53908]: osdmap e76: 8 total, 8 up, 8 in 2026-03-09T19:53:57.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:57 vm06 ceph-mon[48187]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:53:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:57 vm01 ceph-mon[49698]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:53:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:57 vm01 ceph-mon[53908]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:53:58.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:58 vm01 ceph-mon[49698]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 941 B/s rd, 0 op/s 2026-03-09T19:53:58.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:58 vm01 ceph-mon[53908]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 941 B/s rd, 0 op/s 2026-03-09T19:53:59.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:58 vm06 ceph-mon[48187]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 941 B/s rd, 0 op/s 2026-03-09T19:53:59.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:53:59 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:53:59.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:53:59 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:00.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:53:59 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:00.992 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:00 vm01 ceph-mon[49698]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 796 B/s rd, 0 op/s 2026-03-09T19:54:00.992 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:00 vm01 ceph-mon[53908]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 796 B/s rd, 0 op/s 2026-03-09T19:54:01.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:00 vm06 ceph-mon[48187]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 796 B/s rd, 0 op/s 2026-03-09T19:54:01.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:54:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:00] "GET /metrics HTTP/1.1" 200 207658 "" "Prometheus/2.33.4" 2026-03-09T19:54:01.591 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:54:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:01] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:54:03.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:02 vm06 ceph-mon[48187]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:03.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:02 vm01 ceph-mon[49698]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:03.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:02 vm01 ceph-mon[53908]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.7", "id": [7, 4]}]: dispatch 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.7", "id": [7, 4]}]: dispatch 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 2]}]: dispatch 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 2]}]: dispatch 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:54:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.7", "id": [7, 4]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.7", "id": [7, 4]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 2]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 2]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.7", "id": [7, 4]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.7", "id": [7, 4]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 2]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 2]}]: dispatch 2026-03-09T19:54:04.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T19:54:04.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:54:04.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:54:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:04 vm06 ceph-mon[48187]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T19:54:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:54:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.7", "id": [7, 4]}]': finished 2026-03-09T19:54:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]': finished 2026-03-09T19:54:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 2]}]': finished 2026-03-09T19:54:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-09T19:54:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:04 vm06 ceph-mon[48187]: osdmap e77: 8 total, 8 up, 8 in 2026-03-09T19:54:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[49698]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.7", "id": [7, 4]}]': finished 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]': finished 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 2]}]': finished 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[49698]: osdmap e77: 8 total, 8 up, 8 in 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[53908]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.7", "id": [7, 4]}]': finished 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]': finished 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 2]}]': finished 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[53908]: osdmap e77: 8 total, 8 up, 8 in 2026-03-09T19:54:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:54:06.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:05 vm06 ceph-mon[48187]: osdmap e78: 8 total, 8 up, 8 in 2026-03-09T19:54:06.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:05 vm01 ceph-mon[49698]: osdmap e78: 8 total, 8 up, 8 in 2026-03-09T19:54:06.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:05 vm01 ceph-mon[53908]: osdmap e78: 8 total, 8 up, 8 in 2026-03-09T19:54:07.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:06 vm06 ceph-mon[48187]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:07.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:06 vm01 ceph-mon[49698]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:07.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:06 vm01 ceph-mon[53908]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:08.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:08 vm01 ceph-mon[49698]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:08.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:08 vm01 ceph-mon[53908]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:09.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:08 vm06 ceph-mon[48187]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:09.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:09 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:09.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:09 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:10.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:09 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:10.991 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:10 vm01 ceph-mon[49698]: pgmap v70: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T19:54:10.991 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:10 vm01 ceph-mon[53908]: pgmap v70: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T19:54:11.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:10 vm06 ceph-mon[48187]: pgmap v70: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T19:54:11.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:54:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:10] "GET /metrics HTTP/1.1" 200 207630 "" "Prometheus/2.33.4" 2026-03-09T19:54:11.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:54:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:11] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:54:13.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:12 vm06 ceph-mon[48187]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 50 B/s, 0 objects/s recovering 2026-03-09T19:54:13.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:12 vm01 ceph-mon[49698]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 50 B/s, 0 objects/s recovering 2026-03-09T19:54:13.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:12 vm01 ceph-mon[53908]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 50 B/s, 0 objects/s recovering 2026-03-09T19:54:15.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:14 vm06 ceph-mon[48187]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s; 40 B/s, 0 objects/s recovering 2026-03-09T19:54:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:14 vm01 ceph-mon[49698]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s; 40 B/s, 0 objects/s recovering 2026-03-09T19:54:15.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:14 vm01 ceph-mon[53908]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s; 40 B/s, 0 objects/s recovering 2026-03-09T19:54:17.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:16 vm06 ceph-mon[48187]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 937 B/s rd, 0 op/s; 36 B/s, 0 objects/s recovering 2026-03-09T19:54:17.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:16 vm01 ceph-mon[49698]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 937 B/s rd, 0 op/s; 36 B/s, 0 objects/s recovering 2026-03-09T19:54:17.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:16 vm01 ceph-mon[53908]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 937 B/s rd, 0 op/s; 36 B/s, 0 objects/s recovering 2026-03-09T19:54:18.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:18 vm01 ceph-mon[49698]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 33 B/s, 0 objects/s recovering 2026-03-09T19:54:18.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:18 vm01 ceph-mon[53908]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 33 B/s, 0 objects/s recovering 2026-03-09T19:54:19.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:18 vm06 ceph-mon[48187]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 33 B/s, 0 objects/s recovering 2026-03-09T19:54:20.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:19 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:20.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:19 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:20.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:19 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:20 vm06 ceph-mon[48187]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 33 B/s, 0 objects/s recovering 2026-03-09T19:54:21.129 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:54:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:20] "GET /metrics HTTP/1.1" 200 207609 "" "Prometheus/2.33.4" 2026-03-09T19:54:21.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:20 vm01 ceph-mon[49698]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 33 B/s, 0 objects/s recovering 2026-03-09T19:54:21.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:20 vm01 ceph-mon[53908]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 33 B/s, 0 objects/s recovering 2026-03-09T19:54:21.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:54:21 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:21] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:54:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:22 vm06 ceph-mon[48187]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 33 B/s, 0 objects/s recovering 2026-03-09T19:54:23.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:22 vm01 ceph-mon[49698]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 33 B/s, 0 objects/s recovering 2026-03-09T19:54:23.157 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:22 vm01 ceph-mon[53908]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 33 B/s, 0 objects/s recovering 2026-03-09T19:54:25.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:24 vm06 ceph-mon[48187]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:25.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:24 vm01 ceph-mon[49698]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:25.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:24 vm01 ceph-mon[53908]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:27.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:26 vm06 ceph-mon[48187]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:27.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:26 vm01 ceph-mon[49698]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:27.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:26 vm01 ceph-mon[53908]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:28.941 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:28 vm01 ceph-mon[49698]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:28.941 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:28 vm01 ceph-mon[53908]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:29.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:28 vm06 ceph-mon[48187]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:30.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:29 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:30.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:29 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:30.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:29 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:30.992 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:30 vm01 ceph-mon[49698]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:30.992 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:30 vm01 ceph-mon[53908]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:31.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:30 vm06 ceph-mon[48187]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:31.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:54:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:30] "GET /metrics HTTP/1.1" 200 207609 "" "Prometheus/2.33.4" 2026-03-09T19:54:31.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:54:31 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:31] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:54:33.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:32 vm06 ceph-mon[48187]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:33.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:32 vm01 ceph-mon[49698]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:33.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:32 vm01 ceph-mon[53908]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:35.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:34 vm06 ceph-mon[48187]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:35.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:34 vm01 ceph-mon[49698]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:35.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:34 vm01 ceph-mon[53908]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:37.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:36 vm06 ceph-mon[48187]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:37.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:36 vm01 ceph-mon[49698]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:37.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:36 vm01 ceph-mon[53908]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:38.950 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:38 vm01 ceph-mon[49698]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:38.950 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:38 vm01 ceph-mon[53908]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:39.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:38 vm06 ceph-mon[48187]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:40.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:39 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:39 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:39 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:40.991 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:40 vm01 ceph-mon[49698]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:40.991 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:40 vm01 ceph-mon[53908]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:41.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:40 vm06 ceph-mon[48187]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:41.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:54:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:40] "GET /metrics HTTP/1.1" 200 207601 "" "Prometheus/2.33.4" 2026-03-09T19:54:41.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:54:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:41] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:54:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:42 vm06 ceph-mon[48187]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:43.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:42 vm01 ceph-mon[49698]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:43.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:42 vm01 ceph-mon[53908]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:43.756 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:43 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:54:43.756 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:43 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:54:43.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:43 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:54:43.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:43 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:54:43.784 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:43 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:54:43.784 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:43 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:54:44.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:44 vm06 ceph-mon[48187]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:44.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:44 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:54:44.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:44 vm01 ceph-mon[53908]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:44.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:44 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:54:44.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:44 vm01 ceph-mon[49698]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:44.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:44 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:54:46.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:45 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:54:46.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:45 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:54:46.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:45 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:54:46.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:45 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:54:46.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:45 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:54:46.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:45 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:54:47.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:47 vm06 ceph-mon[48187]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:47.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:47 vm01 ceph-mon[49698]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:47.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:47 vm01 ceph-mon[53908]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:48.958 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:48 vm01 ceph-mon[49698]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:48.958 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:48 vm01 ceph-mon[53908]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:49.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:48 vm06 ceph-mon[48187]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:50.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:49 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:50.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:49 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:50.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:49 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:54:50.991 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:50 vm01 ceph-mon[49698]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:50.991 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:50 vm01 ceph-mon[53908]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:51.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:50 vm06 ceph-mon[48187]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:51.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:54:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:50] "GET /metrics HTTP/1.1" 200 207599 "" "Prometheus/2.33.4" 2026-03-09T19:54:51.591 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:54:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:54:51] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:54:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:52 vm06 ceph-mon[48187]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:53.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:52 vm01 ceph-mon[49698]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:53.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:52 vm01 ceph-mon[53908]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:53.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:54:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:54:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:54:53.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:54:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:54:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:54:53.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:54:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:54:53.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:54:53.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:54:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:54:53.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:54:55.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:54 vm06 ceph-mon[48187]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:55.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:54 vm01 ceph-mon[49698]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:55.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:54 vm01 ceph-mon[53908]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:57.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:56 vm06 ceph-mon[48187]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:57.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:56 vm01 ceph-mon[49698]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:57.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:56 vm01 ceph-mon[53908]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:54:58.968 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:58 vm01 ceph-mon[49698]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:58.968 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:58 vm01 ceph-mon[53908]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:54:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:58 vm06 ceph-mon[48187]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:00.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:54:59 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:00.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:54:59 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:54:59 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:00.992 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:00 vm01 ceph-mon[49698]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:00.992 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:00 vm01 ceph-mon[53908]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:01.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:00 vm06 ceph-mon[48187]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:01.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:55:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:00] "GET /metrics HTTP/1.1" 200 207599 "" "Prometheus/2.33.4" 2026-03-09T19:55:01.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:55:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:01] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:55:03.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:02 vm06 ceph-mon[48187]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:03.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:02 vm01 ceph-mon[49698]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:03.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:02 vm01 ceph-mon[53908]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:03.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:03.504Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:03.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:03.504Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:03.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:03.505Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:03.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:03.505Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:03.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:03.506Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:03.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:03.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:04 vm06 ceph-mon[48187]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:55:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:55:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:55:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[49698]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[53908]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:55:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:55:07.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:06 vm06 ceph-mon[48187]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:07.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:06 vm01 ceph-mon[49698]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:07.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:06 vm01 ceph-mon[53908]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:08.976 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:08 vm01 ceph-mon[49698]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:08.976 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:08 vm01 ceph-mon[53908]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:09.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:08 vm06 ceph-mon[48187]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:10.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:09 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:10.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:09 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:10.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:09 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:10.991 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:10 vm01 ceph-mon[49698]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:10.991 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:10 vm01 ceph-mon[53908]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:11.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:10 vm06 ceph-mon[48187]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:11.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:55:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:10] "GET /metrics HTTP/1.1" 200 207607 "" "Prometheus/2.33.4" 2026-03-09T19:55:11.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:55:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:11] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:55:13.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:12 vm06 ceph-mon[48187]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:13.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:12 vm01 ceph-mon[49698]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:13.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:12 vm01 ceph-mon[53908]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:13.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:13.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:13.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:13.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:13.506Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:13.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:13.506Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:13.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:13.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:13.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:13.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:15.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:14 vm06 ceph-mon[48187]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:15.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:14 vm01 ceph-mon[49698]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:15.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:14 vm01 ceph-mon[53908]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:17.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:16 vm06 ceph-mon[48187]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:17.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:16 vm01 ceph-mon[49698]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:17.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:16 vm01 ceph-mon[53908]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:18.985 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:18 vm01 ceph-mon[49698]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:18.985 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:18 vm01 ceph-mon[53908]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:19.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:18 vm06 ceph-mon[48187]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:20.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:19 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:20.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:19 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:20.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:19 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:20.991 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:20 vm01 ceph-mon[49698]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:20.991 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:20 vm01 ceph-mon[53908]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:20 vm06 ceph-mon[48187]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:21.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:55:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:20] "GET /metrics HTTP/1.1" 200 207631 "" "Prometheus/2.33.4" 2026-03-09T19:55:21.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:55:21 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:21] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:55:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:22 vm06 ceph-mon[48187]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:23.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:22 vm01 ceph-mon[49698]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:23.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:22 vm01 ceph-mon[53908]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:23.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:23.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:23.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:23.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:23.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:23.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:23.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:23.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:23.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:23.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:23.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:23.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:25.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:24 vm06 ceph-mon[48187]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:25.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:24 vm01 ceph-mon[49698]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:25.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:24 vm01 ceph-mon[53908]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:27.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:26 vm06 ceph-mon[48187]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:27.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:26 vm01 ceph-mon[49698]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:27.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:26 vm01 ceph-mon[53908]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:29.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:28 vm06 ceph-mon[48187]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:29.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:28 vm01 ceph-mon[49698]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:29.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:28 vm01 ceph-mon[53908]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:30.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:29 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:30.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:29 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:30.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:29 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:31.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:30 vm06 ceph-mon[48187]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:31.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:55:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:30] "GET /metrics HTTP/1.1" 200 207631 "" "Prometheus/2.33.4" 2026-03-09T19:55:31.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:30 vm01 ceph-mon[49698]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:31.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:30 vm01 ceph-mon[53908]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:31.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:55:31 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:31] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:55:33.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:32 vm06 ceph-mon[48187]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:33.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:32 vm01 ceph-mon[49698]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:33.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:32 vm01 ceph-mon[53908]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:33.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:33.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:33.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:33.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:33.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:33.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:33.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:35.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:34 vm06 ceph-mon[48187]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:35.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:34 vm01 ceph-mon[49698]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:35.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:34 vm01 ceph-mon[53908]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:37.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:36 vm06 ceph-mon[48187]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:37.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:36 vm01 ceph-mon[49698]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:37.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:36 vm01 ceph-mon[53908]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:39.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:38 vm06 ceph-mon[48187]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:39.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:38 vm01 ceph-mon[49698]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:39.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:38 vm01 ceph-mon[53908]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:40.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:39 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:39 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:40.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:39 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:41.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:40 vm06 ceph-mon[48187]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:41.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:55:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:40] "GET /metrics HTTP/1.1" 200 207646 "" "Prometheus/2.33.4" 2026-03-09T19:55:41.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:40 vm01 ceph-mon[49698]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:41.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:40 vm01 ceph-mon[53908]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:41.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:55:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:41] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:55:43.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:42 vm01 ceph-mon[49698]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:43.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:42 vm01 ceph-mon[53908]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:42 vm06 ceph-mon[48187]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:43.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:43.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:43.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:43.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:43.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:43.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:45.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:44 vm01 ceph-mon[49698]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:45.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:44 vm01 ceph-mon[53908]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:45.241 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:44 vm06 ceph-mon[48187]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:45 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:55:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:45 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:55:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:45 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:55:46.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:45 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:55:46.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:45 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:55:46.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:45 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:55:46.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:45 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:55:46.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:45 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:55:46.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:45 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:55:47.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:47 vm06 ceph-mon[48187]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:47.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:47 vm01 ceph-mon[49698]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:47.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:47 vm01 ceph-mon[53908]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:49.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:49 vm01 ceph-mon[49698]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:49.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:49 vm01 ceph-mon[53908]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:49.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:49 vm06 ceph-mon[48187]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:50.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:50 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:50 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:50.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:50 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:55:51.299 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:55:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:50] "GET /metrics HTTP/1.1" 200 207622 "" "Prometheus/2.33.4" 2026-03-09T19:55:51.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:51 vm06 ceph-mon[48187]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:51.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:55:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:55:51] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:55:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:51 vm01 ceph-mon[49698]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:51 vm01 ceph-mon[53908]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:52.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:52 vm06 ceph-mon[48187]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:52.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:52 vm01 ceph-mon[49698]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:52.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:52 vm01 ceph-mon[53908]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:53.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:55:53.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:53.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:53.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:53.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:55:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:55:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:55:53.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:55:55.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:54 vm06 ceph-mon[48187]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:55.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:54 vm01 ceph-mon[49698]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:55.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:54 vm01 ceph-mon[53908]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:57.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:56 vm06 ceph-mon[48187]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:57.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:56 vm01 ceph-mon[49698]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:57.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:56 vm01 ceph-mon[53908]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:55:59.023 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:58 vm01 ceph-mon[49698]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:59.023 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:58 vm01 ceph-mon[53908]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:55:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:58 vm06 ceph-mon[48187]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:00.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:55:59 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:00.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:55:59 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:00.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:55:59 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:00.991 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:00 vm01 ceph-mon[49698]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:00.991 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:00 vm01 ceph-mon[53908]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:01.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:00 vm06 ceph-mon[48187]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:01.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:56:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:00] "GET /metrics HTTP/1.1" 200 207622 "" "Prometheus/2.33.4" 2026-03-09T19:56:01.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:56:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:01] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:56:03.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:02 vm06 ceph-mon[48187]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:03.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:02 vm01 ceph-mon[49698]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:03.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:02 vm01 ceph-mon[53908]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:03.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:56:03.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:56:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:03.508Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:03.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:03.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:03.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:03.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:03.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:03.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:56:03.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:56:04.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:56:04.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:56:05.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:04 vm06 ceph-mon[48187]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:56:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:56:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:04 vm01 ceph-mon[49698]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:56:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:56:05.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:04 vm01 ceph-mon[53908]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:05.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:56:05.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:56:07.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:06 vm06 ceph-mon[48187]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:07.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:06 vm01 ceph-mon[49698]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:07.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:06 vm01 ceph-mon[53908]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:09.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:08 vm06 ceph-mon[48187]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:09.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:08 vm01 ceph-mon[49698]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:09.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:08 vm01 ceph-mon[53908]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:10.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:09 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:10.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:09 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:10.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:09 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:11.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:10 vm06 ceph-mon[48187]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:11.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:56:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:10] "GET /metrics HTTP/1.1" 200 207614 "" "Prometheus/2.33.4" 2026-03-09T19:56:11.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:10 vm01 ceph-mon[49698]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:11.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:10 vm01 ceph-mon[53908]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:11.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:56:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:11] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:56:13.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:12 vm06 ceph-mon[48187]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:13.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:12 vm01 ceph-mon[49698]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:13.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:12 vm01 ceph-mon[53908]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:13.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:13.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:13.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:13.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:13.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:13.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:15.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:14 vm06 ceph-mon[48187]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:15.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:14 vm01 ceph-mon[49698]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:15.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:14 vm01 ceph-mon[53908]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:17.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:16 vm01 ceph-mon[49698]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:17.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:16 vm01 ceph-mon[53908]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:17.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:16 vm06 ceph-mon[48187]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:19.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:18 vm01 ceph-mon[53908]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:19.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:18 vm01 ceph-mon[49698]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:19.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:18 vm06 ceph-mon[48187]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:20.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:19 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:20.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:19 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:20.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:19 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:21.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:56:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:20] "GET /metrics HTTP/1.1" 200 207574 "" "Prometheus/2.33.4" 2026-03-09T19:56:21.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:20 vm01 ceph-mon[53908]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:21.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:20 vm01 ceph-mon[49698]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:21.204 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:20 vm06 ceph-mon[48187]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:21.591 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:56:21 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:21] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:56:23.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:22 vm01 ceph-mon[53908]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:23.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:22 vm01 ceph-mon[49698]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:23.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:22 vm06 ceph-mon[48187]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:23.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:23.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:23.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:23.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:23.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:23.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:25.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:24 vm01 ceph-mon[49698]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:25.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:24 vm01 ceph-mon[53908]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:25.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:24 vm06 ceph-mon[48187]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:27.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:26 vm01 ceph-mon[49698]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:27.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:26 vm01 ceph-mon[53908]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:27.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:26 vm06 ceph-mon[48187]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:29.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:28 vm06 ceph-mon[48187]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:29.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:28 vm01 ceph-mon[49698]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:29.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:28 vm01 ceph-mon[53908]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:30.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:29 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:30.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:29 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:30.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:29 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:31.204 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:30 vm06 ceph-mon[48187]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:31.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:56:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:30] "GET /metrics HTTP/1.1" 200 207574 "" "Prometheus/2.33.4" 2026-03-09T19:56:31.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:30 vm01 ceph-mon[49698]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:31.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:30 vm01 ceph-mon[53908]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:31.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:56:31 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:31] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:56:33.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:32 vm06 ceph-mon[48187]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:33.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:32 vm01 ceph-mon[49698]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:33.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:32 vm01 ceph-mon[53908]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:33.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:33.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:33.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:33.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:34.843 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force' 2026-03-09T19:56:35.077 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:34 vm01 ceph-mon[49698]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:35.077 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:34 vm01 ceph-mon[53908]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:35.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:34 vm06 ceph-mon[48187]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:35.358 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force' 2026-03-09T19:56:35.849 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set global log_to_journald false --force' 2026-03-09T19:56:36.340 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (2m) 112s ago 3m 16.0M - ba2b418f427c cf3634b75665 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (3m) 112s ago 3m 44.9M - 8.3.5 dad864ee21e9 bf76c513160b 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (2m) 112s ago 2m 43.7M - 3.5 e1d6a67b021e 2ed5965ab1db 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443 running (4m) 112s ago 4m 417M - 17.2.0 e1d6a67b021e de90cfa26b90 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:9283 running (5m) 112s ago 5m 466M - 17.2.0 e1d6a67b021e 4f3dd125b57b 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (5m) 112s ago 5m 56.1M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (5m) 112s ago 5m 49.4M 2048M 17.2.0 e1d6a67b021e 9f080c80b1b8 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (5m) 112s ago 5m 50.3M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (3m) 112s ago 3m 19.7M - 1dbe0e931976 78658dbfd784 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (3m) 112s ago 3m 17.4M - 1dbe0e931976 65e409c65bb8 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (4m) 112s ago 4m 51.4M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (4m) 112s ago 4m 49.2M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (4m) 112s ago 4m 45.5M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (4m) 112s ago 4m 48.2M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (4m) 112s ago 4m 49.7M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (4m) 112s ago 4m 48.2M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (3m) 112s ago 3m 45.2M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (3m) 112s ago 3m 48.2M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (2m) 112s ago 3m 45.8M - 514e6a882f6e 6204e0dc9649 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (3m) 112s ago 3m 88.8M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T19:56:36.787 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (3m) 112s ago 3m 88.5M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T19:56:36.851 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T19:56:37.042 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:36 vm01 ceph-mon[49698]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:37.042 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:36 vm01 ceph-mon[53908]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:37.310 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T19:56:37.310 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T19:56:37.310 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T19:56:37.310 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:56:37.310 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T19:56:37.310 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T19:56:37.310 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:56:37.310 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout: "mds": {}, 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 15 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:56:37.311 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T19:56:37.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:36 vm06 ceph-mon[48187]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:37.379 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-09T19:56:37.859 INFO:teuthology.orchestra.run.vm01.stdout: cluster: 2026-03-09T19:56:37.859 INFO:teuthology.orchestra.run.vm01.stdout: id: 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:56:37.859 INFO:teuthology.orchestra.run.vm01.stdout: health: HEALTH_OK 2026-03-09T19:56:37.859 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: services: 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: mon: 3 daemons, quorum a,c,b (age 4m) 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: mgr: y(active, since 3m), standbys: x 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: osd: 8 osds: 8 up (since 3m), 8 in (since 3m) 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: data: 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: pools: 6 pools, 161 pgs 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: objects: 209 objects, 457 KiB 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: usage: 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: pgs: 161 active+clean 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: io: 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: client: 1.2 KiB/s rd, 1 op/s rd, 0 op/s wr 2026-03-09T19:56:37.860 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:56:37.926 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls' 2026-03-09T19:56:38.118 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:37 vm01 ceph-mon[49698]: from='client.14802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:38.118 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:37 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2001711496' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T19:56:38.118 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:37 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1675193596' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T19:56:38.118 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:37 vm01 ceph-mon[53908]: from='client.14802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:38.118 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:37 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2001711496' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T19:56:38.118 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:37 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1675193596' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T19:56:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:37 vm06 ceph-mon[48187]: from='client.14802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:37 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2001711496' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T19:56:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:37 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1675193596' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager ?:9093,9094 1/1 113s ago 3m vm01=a;count:1 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:grafana ?:3000 1/1 113s ago 3m vm06=a;count:1 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo 1/1 113s ago 3m count:1 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:mgr 2/2 113s ago 4m vm01=y;vm06=x;count:2 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:mon 3/3 113s ago 5m vm01:192.168.123.101=a;vm01:[v2:192.168.123.101:3301,v1:192.168.123.101:6790]=c;vm06:192.168.123.106=b;count:3 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter ?:9100 2/2 113s ago 3m vm01=a;vm06=b;count:2 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:osd 8 113s ago - 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:prometheus ?:9095 1/1 113s ago 3m vm06=a;count:1 2026-03-09T19:56:38.367 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo ?:8000 2/2 113s ago 3m count:2 2026-03-09T19:56:38.416 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-09T19:56:39.081 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to redeploy mgr.x on host 'vm06' 2026-03-09T19:56:39.157 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps --refresh' 2026-03-09T19:56:39.301 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:38 vm01 ceph-mon[49698]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:39.301 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:38 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1723003123' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T19:56:39.302 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:38 vm01 ceph-mon[53908]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:39.302 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:38 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1723003123' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T19:56:39.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:38 vm06 ceph-mon[48187]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:39.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:38 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1723003123' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (2m) 114s ago 3m 16.0M - ba2b418f427c cf3634b75665 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (3m) 114s ago 3m 44.9M - 8.3.5 dad864ee21e9 bf76c513160b 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (3m) 114s ago 3m 43.7M - 3.5 e1d6a67b021e 2ed5965ab1db 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443 running (4m) 114s ago 4m 417M - 17.2.0 e1d6a67b021e de90cfa26b90 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:9283 running (5m) 114s ago 5m 466M - 17.2.0 e1d6a67b021e 4f3dd125b57b 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (5m) 114s ago 5m 56.1M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (5m) 114s ago 5m 49.4M 2048M 17.2.0 e1d6a67b021e 9f080c80b1b8 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (5m) 114s ago 5m 50.3M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (3m) 114s ago 3m 19.7M - 1dbe0e931976 78658dbfd784 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (3m) 114s ago 3m 17.4M - 1dbe0e931976 65e409c65bb8 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (4m) 114s ago 4m 51.4M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (4m) 114s ago 4m 49.2M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (4m) 114s ago 4m 45.5M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (4m) 114s ago 4m 48.2M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (4m) 114s ago 4m 49.7M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (4m) 114s ago 4m 48.2M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (3m) 114s ago 3m 45.2M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (3m) 114s ago 3m 48.2M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (2m) 114s ago 3m 45.8M - 514e6a882f6e 6204e0dc9649 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (3m) 114s ago 3m 88.8M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T19:56:39.621 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (3m) 114s ago 3m 88.5M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T19:56:39.695 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T19:56:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='client.24751 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.x", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:56:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: Schedule redeploy daemon mgr.x 2026-03-09T19:56:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:56:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:56:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:56:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[49698]: Deploying daemon mgr.x on vm06 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='client.24751 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.x", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: Schedule redeploy daemon mgr.x 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:56:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:40 vm01 ceph-mon[53908]: Deploying daemon mgr.x on vm06 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='client.24751 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.x", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: Schedule redeploy daemon mgr.x 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:56:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:40 vm06 ceph-mon[48187]: Deploying daemon mgr.x on vm06 2026-03-09T19:56:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:56:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:40] "GET /metrics HTTP/1.1" 200 207572 "" "Prometheus/2.33.4" 2026-03-09T19:56:41.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:41 vm01 ceph-mon[49698]: from='client.14835 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:41.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:41 vm01 ceph-mon[49698]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:41.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:41 vm01 ceph-mon[53908]: from='client.14835 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:41.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:41 vm01 ceph-mon[53908]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:41.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:41 vm06 ceph-mon[48187]: from='client.14835 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:56:41.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:41 vm06 ceph-mon[48187]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:41.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:56:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:41] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:56:43.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:43 vm01 ceph-mon[49698]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:43.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:43 vm01 ceph-mon[53908]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:43 vm06 ceph-mon[48187]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:43.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:43.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:43.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:43.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:43.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:43.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:45.396 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:45 vm06 ceph-mon[48187]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:45 vm01 ceph-mon[49698]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:45.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:45 vm01 ceph-mon[53908]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:47.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:47 vm06 ceph-mon[48187]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:47.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:47 vm01 ceph-mon[49698]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:47.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:47 vm01 ceph-mon[53908]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:49.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:49 vm01 ceph-mon[49698]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:49.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:49 vm01 ceph-mon[53908]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:49 vm06 ceph-mon[48187]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:50 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:50 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:50.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:50 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:56:51.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:51 vm01 ceph-mon[49698]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:51.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:56:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:50] "GET /metrics HTTP/1.1" 200 207579 "" "Prometheus/2.33.4" 2026-03-09T19:56:51.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:51 vm01 ceph-mon[53908]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:51 vm06 ceph-mon[48187]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:51.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:56:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:56:51] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:56:53.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:53 vm06 ceph-mon[48187]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:53.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:53.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:53.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:56:53.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:53.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:53.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:53.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:53.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:53.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:56:53.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:56:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:56:53.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:56:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:53 vm01 ceph-mon[49698]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:53.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:53 vm01 ceph-mon[53908]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:55 vm06 ceph-mon[48187]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:55.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:55 vm01 ceph-mon[49698]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:55.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:55 vm01 ceph-mon[53908]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:56.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:56 vm06 ceph-mon[48187]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:56.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:56 vm01 ceph-mon[49698]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:56.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:56 vm01 ceph-mon[53908]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:56:59.071 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:58 vm01 ceph-mon[49698]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:59.071 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:58 vm01 ceph-mon[53908]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:56:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:58 vm06 ceph-mon[48187]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:00.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:56:59 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:00.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:56:59 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:00.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:56:59 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:01.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:00 vm06 ceph-mon[48187]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:01.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:57:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:00] "GET /metrics HTTP/1.1" 200 207579 "" "Prometheus/2.33.4" 2026-03-09T19:57:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:00 vm01 ceph-mon[49698]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:01.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:00 vm01 ceph-mon[53908]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:01.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:01] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:57:03.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:02 vm06 ceph-mon[48187]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:03.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:02 vm01 ceph-mon[49698]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:03.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:02 vm01 ceph-mon[53908]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:03.770 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:03.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:03.771 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:03.771 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:03.771 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:03.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:03.771 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:03.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:03.771 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:03.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:57:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:57:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:57:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:57:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:57:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:57:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:57:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:57:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:57:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:57:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:57:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:57:05.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:04 vm06 ceph-mon[48187]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:04 vm01 ceph-mon[49698]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:05.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:04 vm01 ceph-mon[53908]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:07.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:06 vm06 ceph-mon[48187]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:07.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:06 vm01 ceph-mon[49698]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:07.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:06 vm01 ceph-mon[53908]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:09.081 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:08 vm01 ceph-mon[49698]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:09.081 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:08 vm01 ceph-mon[53908]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:09.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:08 vm06 ceph-mon[48187]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:10.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:09 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:10.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:09 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:10.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:09 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:11.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:10 vm06 ceph-mon[48187]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:11.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:57:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:10] "GET /metrics HTTP/1.1" 200 207580 "" "Prometheus/2.33.4" 2026-03-09T19:57:11.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:10 vm01 ceph-mon[49698]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:11.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:10 vm01 ceph-mon[53908]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:11.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[49210]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:11] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:57:13.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:12 vm01 ceph-mon[49698]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:13.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:12 vm01 ceph-mon[53908]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:12 vm06 ceph-mon[48187]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:13.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:13.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:13.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:13.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:13.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:13.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:15 vm01 ceph-mon[49698]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:15.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:15 vm01 ceph-mon[53908]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:15.435 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:15 vm06 ceph-mon[48187]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:17.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:17 vm06 ceph-mon[48187]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:17.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:17 vm01 ceph-mon[49698]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:17.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:17 vm01 ceph-mon[53908]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:19.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:19 vm01 ceph-mon[49698]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:19.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:19 vm01 ceph-mon[53908]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:20.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:19 vm06 ceph-mon[48187]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:20.806 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:20 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:20.806 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:20 vm06 ceph-mon[48187]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:20 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:20 vm01 ceph-mon[49698]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:20 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:20 vm01 ceph-mon[53908]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:20 vm06 systemd[1]: Stopping Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:20 vm06 bash[66285]: Error: no container with name or ID "ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr.x" found: no such container 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:20 vm06 podman[66292]: 2026-03-09 19:57:20.984725057 +0000 UTC m=+0.045314423 container died de90cfa26b90b23729ca95c1956961184fbe2a25bfc50e6cc5e34cdf4e07c0d2 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, ceph=True, GIT_BRANCH=HEAD, version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Guillaume Abrioux , io.openshift.expose-services=, vcs-type=git, RELEASE=HEAD, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.19.8, name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.tags=base centos centos-stream) 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 podman[66292]: 2026-03-09 19:57:21.010006666 +0000 UTC m=+0.070596042 container remove de90cfa26b90b23729ca95c1956961184fbe2a25bfc50e6cc5e34cdf4e07c0d2 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, name=centos-stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, RELEASE=HEAD, version=8, ceph=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, maintainer=Guillaume Abrioux , GIT_REPO=https://github.com/ceph/ceph-container.git, vendor=Red Hat, Inc., architecture=x86_64, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0, com.redhat.component=centos-stream-container, distribution-scope=public, GIT_CLEAN=True, io.buildah.version=1.19.8, io.openshift.expose-services=, release=754) 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 bash[66292]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 bash[66312]: Error: no container with name or ID "ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr.x" found: no such container 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x.service: Failed with result 'exit-code'. 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 systemd[1]: Stopped Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:57:21.207 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x.service: Consumed 11.159s CPU time. 2026-03-09T19:57:21.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:57:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:20] "GET /metrics HTTP/1.1" 200 207578 "" "Prometheus/2.33.4" 2026-03-09T19:57:21.471 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 systemd[1]: Starting Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:57:21.471 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 podman[66394]: 2026-03-09 19:57:21.315517807 +0000 UTC m=+0.018682237 container create 0b7c9ce0e14587682170d50ecc0047870d25cd4244b88915a2e929f67c9e00b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223) 2026-03-09T19:57:21.471 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 podman[66394]: 2026-03-09 19:57:21.354291277 +0000 UTC m=+0.057455698 container init 0b7c9ce0e14587682170d50ecc0047870d25cd4244b88915a2e929f67c9e00b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T19:57:21.471 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 podman[66394]: 2026-03-09 19:57:21.356915369 +0000 UTC m=+0.060079789 container start 0b7c9ce0e14587682170d50ecc0047870d25cd4244b88915a2e929f67c9e00b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3) 2026-03-09T19:57:21.471 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 bash[66394]: 0b7c9ce0e14587682170d50ecc0047870d25cd4244b88915a2e929f67c9e00b8 2026-03-09T19:57:21.471 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 podman[66394]: 2026-03-09 19:57:21.308081499 +0000 UTC m=+0.011245939 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T19:57:21.471 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 systemd[1]: Started Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:57:21.790 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:21 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:21.789Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=7 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": dial tcp 192.168.123.106:8443: connect: connection refused" 2026-03-09T19:57:21.797 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:21.474+0000 7f751fe55140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T19:57:21.797 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:21.518+0000 7f751fe55140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T19:57:22.092 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:21 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:21.956+0000 7f751fe55140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T19:57:22.555 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:22 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:22.555 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:22 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:22.555 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:22 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:57:22.555 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:22 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:57:22.555 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:22 vm06 ceph-mon[48187]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:22.555 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:22 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:22.319+0000 7f751fe55140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T19:57:22.555 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:22 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T19:57:22.556 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:22 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T19:57:22.556 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:22 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: from numpy import show_config as show_numpy_config 2026-03-09T19:57:22.556 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:22 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:22.420+0000 7f751fe55140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T19:57:22.556 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:22 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:22.465+0000 7f751fe55140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T19:57:22.556 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:22 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:22.548+0000 7f751fe55140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[49698]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:57:22.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:22 vm01 ceph-mon[53908]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:23.046 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:22.751Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": dial tcp 192.168.123.106:8443: connect: connection refused" 2026-03-09T19:57:23.254 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:23 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:23.126+0000 7f751fe55140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T19:57:23.254 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:23 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:23.252+0000 7f751fe55140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:57:23.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:23 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:23.300+0000 7f751fe55140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T19:57:23.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:23 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:23.344+0000 7f751fe55140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T19:57:23.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:23 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:23.404+0000 7f751fe55140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T19:57:23.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:23 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:23.449+0000 7f751fe55140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T19:57:23.811 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:23.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": dial tcp 192.168.123.106:8443: connect: connection refused" 2026-03-09T19:57:23.811 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:23.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": dial tcp 192.168.123.106:8443: connect: connection refused" 2026-03-09T19:57:23.811 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:23.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": dial tcp 192.168.123.106:8443: connect: connection refused" 2026-03-09T19:57:23.811 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:23.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": dial tcp 192.168.123.106:8443: connect: connection refused" 2026-03-09T19:57:23.811 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:23.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:23.811 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:23.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:23.928 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:23 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:23.928 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:23 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:23.928 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:23 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:23.928 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:23 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:23.640+0000 7f751fe55140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T19:57:23.928 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:23 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:23.691+0000 7f751fe55140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T19:57:24.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:23 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:24.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:23 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:24.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:23 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:24.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:23 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:24.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:23 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:24.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:23 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:57:24.231 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:23 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:23.927+0000 7f751fe55140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T19:57:24.512 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:24.230+0000 7f751fe55140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T19:57:24.512 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:24.268+0000 7f751fe55140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T19:57:24.512 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:24.311+0000 7f751fe55140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T19:57:24.512 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:24.389+0000 7f751fe55140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T19:57:24.512 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:24.429+0000 7f751fe55140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T19:57:24.785 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:24.510+0000 7f751fe55140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T19:57:24.785 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:24.639+0000 7f751fe55140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:57:25.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:24 vm06 ceph-mon[48187]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:25.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:24.784+0000 7f751fe55140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T19:57:25.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:24.825+0000 7f751fe55140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T19:57:25.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:19:57:24] ENGINE Bus STARTING 2026-03-09T19:57:25.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: CherryPy Checker: 2026-03-09T19:57:25.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: The Application mounted at '' has an empty config. 2026-03-09T19:57:25.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T19:57:25.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:19:57:24] ENGINE Serving on http://:::9283 2026-03-09T19:57:25.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:19:57:24] ENGINE Bus STARTED 2026-03-09T19:57:25.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:24 vm01 ceph-mon[49698]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:25.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:24 vm01 ceph-mon[53908]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:25.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:25 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:25.335Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:25.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:25 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:25.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:26 vm06 ceph-mon[48187]: Standby manager daemon x restarted 2026-03-09T19:57:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:26 vm06 ceph-mon[48187]: Standby manager daemon x started 2026-03-09T19:57:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:26 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T19:57:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:26 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:57:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:26 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T19:57:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:26 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[49698]: Standby manager daemon x restarted 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[49698]: Standby manager daemon x started 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[53908]: Standby manager daemon x restarted 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[53908]: Standby manager daemon x started 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T19:57:26.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:26 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:57:27.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:27 vm01 ceph-mon[49698]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:27.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:27 vm01 ceph-mon[49698]: mgrmap e21: y(active, since 4m), standbys: x 2026-03-09T19:57:27.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:27 vm01 ceph-mon[53908]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:27.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:27 vm01 ceph-mon[53908]: mgrmap e21: y(active, since 4m), standbys: x 2026-03-09T19:57:27.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:27 vm06 ceph-mon[48187]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:27.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:27 vm06 ceph-mon[48187]: mgrmap e21: y(active, since 4m), standbys: x 2026-03-09T19:57:29.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:29 vm01 ceph-mon[49698]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:29.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:29 vm01 ceph-mon[53908]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:29.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:29 vm06 ceph-mon[48187]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:30 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:30.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:30 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:30.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:30 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:31.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:57:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:30] "GET /metrics HTTP/1.1" 200 207578 "" "Prometheus/2.33.4" 2026-03-09T19:57:31.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:31 vm01 ceph-mon[49698]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:31.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:31 vm01 ceph-mon[53908]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:31 vm06 ceph-mon[48187]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:31.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:31 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:31] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:57:33.515 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:33 vm01 ceph-mon[49698]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:33.515 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:33 vm01 ceph-mon[53908]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:33 vm06 ceph-mon[48187]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:33.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:33.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:33.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:35.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:35 vm06 ceph-mon[48187]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:35 vm01 ceph-mon[49698]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:35 vm01 ceph-mon[53908]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:37 vm06 ceph-mon[48187]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:37.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:37 vm01 ceph-mon[49698]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:37.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:37 vm01 ceph-mon[53908]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:39.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:39 vm01 ceph-mon[49698]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:39.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:39 vm01 ceph-mon[53908]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:39 vm06 ceph-mon[48187]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:40.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:40 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:40.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:40 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:40.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:40 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:57:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:40] "GET /metrics HTTP/1.1" 200 207574 "" "Prometheus/2.33.4" 2026-03-09T19:57:41.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:41 vm01 ceph-mon[49698]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:41.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:41 vm01 ceph-mon[53908]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:41.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:41 vm06 ceph-mon[48187]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:41.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:41] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:57:43.515 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:43 vm01 ceph-mon[53908]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:43.515 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:43 vm01 ceph-mon[49698]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:43.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:43 vm06 ceph-mon[48187]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:43.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:43.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:43.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:43.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:43.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:43.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:45.542 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:45 vm06 ceph-mon[48187]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:45 vm01 ceph-mon[49698]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:45.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:45 vm01 ceph-mon[53908]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:47.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:47 vm06 ceph-mon[48187]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:47.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:47 vm01 ceph-mon[53908]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:47.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:47 vm01 ceph-mon[49698]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:49.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:49 vm01 ceph-mon[53908]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:49.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:49 vm01 ceph-mon[49698]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:49 vm06 ceph-mon[48187]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:50.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:50 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:50.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:50 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:50.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:50 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:57:51.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:57:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:50] "GET /metrics HTTP/1.1" 200 207576 "" "Prometheus/2.33.4" 2026-03-09T19:57:51.466 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:57:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:57:51] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:57:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:51 vm06 ceph-mon[48187]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:51.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:51 vm01 ceph-mon[53908]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:51.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:51 vm01 ceph-mon[49698]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:52 vm06 ceph-mon[48187]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:52.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:52 vm01 ceph-mon[53908]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:52.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:52 vm01 ceph-mon[49698]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:53.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:57:53.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:53.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:53.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:53.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:57:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:57:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:57:53.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:57:55.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:54 vm06 ceph-mon[48187]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:55.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:54 vm01 ceph-mon[49698]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:55.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:54 vm01 ceph-mon[53908]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:57.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:56 vm06 ceph-mon[48187]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:57.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:56 vm01 ceph-mon[49698]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:57.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:56 vm01 ceph-mon[53908]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:57:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:58 vm06 ceph-mon[48187]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:59.119 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:58 vm01 ceph-mon[49698]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:57:59.119 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:58 vm01 ceph-mon[53908]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:00.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:57:59 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:00.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:57:59 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:00.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:57:59 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:01.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:00 vm06 ceph-mon[48187]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:01.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:58:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:00] "GET /metrics HTTP/1.1" 200 207576 "" "Prometheus/2.33.4" 2026-03-09T19:58:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:00 vm01 ceph-mon[49698]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:01.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:00 vm01 ceph-mon[53908]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:01.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:58:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:01] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:58:03.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:02 vm06 ceph-mon[48187]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:03.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:02 vm01 ceph-mon[49698]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:03.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:02 vm01 ceph-mon[53908]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:03.806 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:03.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:03.806 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:03.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:03.806 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:03.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:03.806 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:03.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:03.806 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:03.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:03.806 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:03.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:58:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:58:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:03 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:58:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:03 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:58:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:58:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:58:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:58:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:03 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:58:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:58:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:58:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:03 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:58:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:03 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:58:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:04 vm06 ceph-mon[48187]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:04 vm01 ceph-mon[49698]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:05.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:04 vm01 ceph-mon[53908]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:07.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:06 vm06 ceph-mon[48187]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:07.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:06 vm01 ceph-mon[49698]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:07.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:06 vm01 ceph-mon[53908]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:09.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:08 vm06 ceph-mon[48187]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:09.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:08 vm01 ceph-mon[49698]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:09.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:08 vm01 ceph-mon[53908]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:10.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:10 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:10.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:10 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:10.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:10 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:11.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:11 vm06 ceph-mon[48187]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:58:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:11] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:58:11.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:58:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:10] "GET /metrics HTTP/1.1" 200 207575 "" "Prometheus/2.33.4" 2026-03-09T19:58:11.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:11 vm01 ceph-mon[49698]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:11.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:11 vm01 ceph-mon[53908]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:13 vm06 ceph-mon[48187]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:13.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:13 vm01 ceph-mon[49698]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:13.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:13 vm01 ceph-mon[53908]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:13.518Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:13.518Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:13.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:13.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:13.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:13.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:15 vm06 ceph-mon[48187]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:15 vm01 ceph-mon[49698]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:15.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:15 vm01 ceph-mon[53908]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:17.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:17 vm06 ceph-mon[48187]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:17.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:17 vm01 ceph-mon[49698]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:17.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:17 vm01 ceph-mon[53908]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:19.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:19 vm06 ceph-mon[48187]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:19.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:19 vm01 ceph-mon[49698]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:19.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:19 vm01 ceph-mon[53908]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:20.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:20 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:20.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:20 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:20.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:20 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:21.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:21 vm06 ceph-mon[48187]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:21.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:58:21 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:21] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:58:21.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:58:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:20] "GET /metrics HTTP/1.1" 200 207578 "" "Prometheus/2.33.4" 2026-03-09T19:58:21.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:21 vm01 ceph-mon[49698]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:21.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:21 vm01 ceph-mon[53908]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:23.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:23 vm01 ceph-mon[49698]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:23.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:23 vm01 ceph-mon[53908]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:23 vm06 ceph-mon[48187]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:23.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:23.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:23.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:23.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:23.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:23.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:23.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:23.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:23.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:24.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:24 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:58:24.859 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:24 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:58:24.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:24 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:58:24.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:24 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:58:24.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:24 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:58:24.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:24 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:58:25.466 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:25 vm06 ceph-mon[48187]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:25.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:25 vm01 ceph-mon[49698]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:25.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:25 vm01 ceph-mon[53908]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:26 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:58:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:26 vm01 ceph-mon[49698]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:26 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:58:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:26 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:58:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:26 vm01 ceph-mon[53908]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:26 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:58:27.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:26 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:58:27.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:26 vm06 ceph-mon[48187]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:27.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:26 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:58:29.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:28 vm06 ceph-mon[48187]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:29.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:28 vm01 ceph-mon[49698]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:29.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:28 vm01 ceph-mon[53908]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:30.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:29 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:30.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:29 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:30.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:29 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:31.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:58:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:30] "GET /metrics HTTP/1.1" 200 207578 "" "Prometheus/2.33.4" 2026-03-09T19:58:31.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:30 vm01 ceph-mon[49698]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:31.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:30 vm01 ceph-mon[53908]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:31.206 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:30 vm06 ceph-mon[48187]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:31.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:58:31 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:31] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:58:33.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:32 vm01 ceph-mon[49698]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:33.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:32 vm01 ceph-mon[53908]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:33.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:32 vm06 ceph-mon[48187]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:33.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:33.520Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:33.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:33.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:33.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:33.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:35.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:34 vm06 ceph-mon[48187]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:35.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:34 vm01 ceph-mon[49698]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:35.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:34 vm01 ceph-mon[53908]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:36 vm06 ceph-mon[48187]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:37.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:36 vm01 ceph-mon[49698]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:37.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:36 vm01 ceph-mon[53908]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:39.154 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:38 vm01 ceph-mon[49698]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:39.154 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:38 vm01 ceph-mon[53908]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:39.341 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:38 vm06 ceph-mon[48187]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:39 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:40.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:39 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:40.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:39 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:41.206 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:40 vm06 ceph-mon[48187]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:58:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:40] "GET /metrics HTTP/1.1" 200 207577 "" "Prometheus/2.33.4" 2026-03-09T19:58:41.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:40 vm01 ceph-mon[49698]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:41.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:40 vm01 ceph-mon[53908]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:41.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:58:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:41] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:58:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:42 vm06 ceph-mon[48187]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:43.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:42 vm01 ceph-mon[49698]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:43.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:42 vm01 ceph-mon[53908]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:43.520Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:43.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:43.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:43.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:43.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:43.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:43.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:43.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:45.301 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:44 vm06 ceph-mon[48187]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:45.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:44 vm01 ceph-mon[49698]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:45.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:44 vm01 ceph-mon[53908]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:47.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:46 vm06 ceph-mon[48187]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:47.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:46 vm01 ceph-mon[49698]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:47.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:46 vm01 ceph-mon[53908]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:49.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:48 vm06 ceph-mon[48187]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:49.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:48 vm01 ceph-mon[49698]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:49.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:48 vm01 ceph-mon[53908]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:50.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:49 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:50.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:49 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:50.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:49 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:58:51.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:50 vm06 ceph-mon[48187]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:51.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:58:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:51] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:58:51.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:58:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:58:50] "GET /metrics HTTP/1.1" 200 207578 "" "Prometheus/2.33.4" 2026-03-09T19:58:51.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:50 vm01 ceph-mon[49698]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:51.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:50 vm01 ceph-mon[53908]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:53.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:52 vm06 ceph-mon[48187]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:53.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:52 vm01 ceph-mon[49698]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:53.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:52 vm01 ceph-mon[53908]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:53.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:58:53.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:53.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:53.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:53.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:58:53.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:58:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:58:53.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:58:55.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:55 vm06 ceph-mon[48187]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:55.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:55 vm01 ceph-mon[49698]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:55.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:55 vm01 ceph-mon[53908]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:57.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:57 vm06 ceph-mon[48187]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:57 vm01 ceph-mon[49698]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:57.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:57 vm01 ceph-mon[53908]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:58:59.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:58:59 vm06 ceph-mon[48187]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:59.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:58:59 vm01 ceph-mon[49698]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:58:59.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:58:59 vm01 ceph-mon[53908]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:00.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:00 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:00.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:00 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:00.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:00 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:01.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:01 vm06 ceph-mon[48187]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:01.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:01] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:59:01.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:00] "GET /metrics HTTP/1.1" 200 207578 "" "Prometheus/2.33.4" 2026-03-09T19:59:01.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:01 vm01 ceph-mon[49698]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:01.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:01 vm01 ceph-mon[53908]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:03 vm06 ceph-mon[48187]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:03.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:03 vm01 ceph-mon[49698]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:03.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:03 vm01 ceph-mon[53908]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:03.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:03.522Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:03.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:03.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:03.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:03.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:03 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:03.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:04 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:04 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:04 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:04 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:04.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:04.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:04.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:04 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:04.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:04 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:05.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:05 vm06 ceph-mon[48187]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:05.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:05 vm01 ceph-mon[49698]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:05.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:05 vm01 ceph-mon[53908]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:07.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:07 vm06 ceph-mon[48187]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:07.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:07 vm01 ceph-mon[49698]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:07.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:07 vm01 ceph-mon[53908]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:09.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:09 vm06 ceph-mon[48187]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:09.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:09 vm01 ceph-mon[49698]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:09.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:09 vm01 ceph-mon[53908]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:10.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:10 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:10.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:10 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:10.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:10 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:11.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:11 vm06 ceph-mon[48187]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:11] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:59:11.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:10] "GET /metrics HTTP/1.1" 200 207572 "" "Prometheus/2.33.4" 2026-03-09T19:59:11.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:11 vm01 ceph-mon[49698]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:11.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:11 vm01 ceph-mon[53908]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:13 vm06 ceph-mon[48187]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:13.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:13 vm01 ceph-mon[49698]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:13.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:13 vm01 ceph-mon[53908]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:13.522Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:13.522Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:13.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:13.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:13.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:13.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:13.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:15 vm01 ceph-mon[49698]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:15.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:15 vm01 ceph-mon[53908]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:15 vm06 ceph-mon[48187]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:17.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:17 vm01 ceph-mon[49698]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:17.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:17 vm01 ceph-mon[53908]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:17.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:17 vm06 ceph-mon[48187]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:19.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:19 vm01 ceph-mon[49698]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:19.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:19 vm01 ceph-mon[53908]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:19.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:19 vm06 ceph-mon[48187]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:20.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:20 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:20.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:20 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:20.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:20 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:21.251 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:20] "GET /metrics HTTP/1.1" 200 207577 "" "Prometheus/2.33.4" 2026-03-09T19:59:21.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:21 vm06 ceph-mon[48187]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:21.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:21 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:21] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:59:21.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:21 vm01 ceph-mon[49698]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:21.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:21 vm01 ceph-mon[53908]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:23.522 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:23 vm01 ceph-mon[49698]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:23.522 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:23 vm01 ceph-mon[53908]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:23 vm06 ceph-mon[48187]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:23.522Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:23.523Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:23.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:23.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:23.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:23.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:23.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:25.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:25 vm06 ceph-mon[48187]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:25.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:25 vm01 ceph-mon[53908]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:25.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:25 vm01 ceph-mon[49698]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:26 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:59:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:26 vm06 ceph-mon[48187]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:59:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:26 vm06 ceph-mon[48187]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:59:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:26 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:59:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:26 vm01 ceph-mon[49698]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:59:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:26 vm01 ceph-mon[49698]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:59:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:26 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:59:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:26 vm01 ceph-mon[53908]: from='mgr.14400 192.168.123.101:0/1450414147' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:59:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:26 vm01 ceph-mon[53908]: from='mgr.14400 ' entity='mgr.y' 2026-03-09T19:59:27.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:27 vm06 ceph-mon[48187]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:27.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:27 vm01 ceph-mon[49698]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:27.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:27 vm01 ceph-mon[53908]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:29.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:29 vm06 ceph-mon[48187]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:29.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:29 vm01 ceph-mon[49698]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:29.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:29 vm01 ceph-mon[53908]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:30 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:30.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:30 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:30.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:30 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:31.317 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:30] "GET /metrics HTTP/1.1" 200 207577 "" "Prometheus/2.33.4" 2026-03-09T19:59:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:31 vm06 ceph-mon[48187]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:31.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:31 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:31] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:59:31.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:31 vm01 ceph-mon[49698]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:31.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:31 vm01 ceph-mon[53908]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:32.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:32 vm06 ceph-mon[48187]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:32.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:32 vm01 ceph-mon[49698]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:32.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:32 vm01 ceph-mon[53908]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:33.523Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:33.524Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:33.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:33.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:33.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:33.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:33.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:35.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:34 vm06 ceph-mon[48187]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:35.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:34 vm01 ceph-mon[53908]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:35.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:34 vm01 ceph-mon[49698]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:37.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:36 vm06 ceph-mon[48187]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:37.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:36 vm01 ceph-mon[49698]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:37.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:36 vm01 ceph-mon[53908]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:39.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:38 vm06 ceph-mon[48187]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:39.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:38 vm01 ceph-mon[49698]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:39.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:38 vm01 ceph-mon[53908]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:40.026 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T19:59:40.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:39 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:39 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:39 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:40.538 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (5m) 2m ago 6m 27.0M - ba2b418f427c cf3634b75665 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (6m) 74s ago 6m 46.8M - 8.3.5 dad864ee21e9 bf76c513160b 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (6m) 2m ago 6m 50.0M - 3.5 e1d6a67b021e 2ed5965ab1db 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283 running (2m) 74s ago 7m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 0b7c9ce0e145 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:9283 running (8m) 2m ago 8m 470M - 17.2.0 e1d6a67b021e 4f3dd125b57b 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (8m) 2m ago 8m 50.5M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (8m) 74s ago 8m 41.9M 2048M 17.2.0 e1d6a67b021e 9f080c80b1b8 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (8m) 2m ago 8m 39.3M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (6m) 2m ago 6m 22.5M - 1dbe0e931976 78658dbfd784 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (6m) 74s ago 6m 18.0M - 1dbe0e931976 65e409c65bb8 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (7m) 2m ago 7m 53.1M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (7m) 2m ago 7m 50.8M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (7m) 2m ago 7m 47.0M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (7m) 2m ago 7m 49.9M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (7m) 74s ago 7m 51.8M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (7m) 74s ago 7m 50.1M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (6m) 74s ago 6m 47.2M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (6m) 74s ago 6m 50.1M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (5m) 74s ago 6m 58.9M - 514e6a882f6e 6204e0dc9649 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (6m) 2m ago 6m 89.6M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T19:59:40.539 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (6m) 74s ago 6m 89.9M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T19:59:40.786 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T19:59:41.077 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:40 vm01 ceph-mon[49698]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:41.078 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:40] "GET /metrics HTTP/1.1" 200 207576 "" "Prometheus/2.33.4" 2026-03-09T19:59:41.078 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:40 vm01 ceph-mon[53908]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:41.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:40 vm06 ceph-mon[48187]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "mds": {}, 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 14, 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T19:59:41.453 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T19:59:41.515 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-09T19:59:41.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:41 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:41] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:59:42.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:41 vm01 ceph-mon[49698]: from='client.14847 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:59:42.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:41 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/4057055633' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T19:59:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:41 vm01 ceph-mon[53908]: from='client.14847 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:59:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:41 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/4057055633' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: cluster: 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: id: 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: health: HEALTH_OK 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: services: 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: mon: 3 daemons, quorum a,c,b (age 7m) 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: mgr: y(active, since 6m), standbys: x 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: osd: 8 osds: 8 up (since 6m), 8 in (since 6m) 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: data: 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: pools: 6 pools, 161 pgs 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: objects: 209 objects, 457 KiB 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: usage: 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: pgs: 161 active+clean 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: io: 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: client: 853 B/s rd, 0 op/s rd, 0 op/s wr 2026-03-09T19:59:42.202 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T19:59:42.289 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T19:59:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:41 vm06 ceph-mon[48187]: from='client.14847 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T19:59:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:41 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/4057055633' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T19:59:42.959 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T19:59:43.099 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | length == 2'"'"'' 2026-03-09T19:59:43.265 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:42 vm01 ceph-mon[53908]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:43.265 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:42 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1056774472' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T19:59:43.266 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:42 vm01 ceph-mon[49698]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:43.266 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:42 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1056774472' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T19:59:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:42 vm06 ceph-mon[48187]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:42 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1056774472' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T19:59:43.661 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T19:59:43.713 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph mgr fail' 2026-03-09T19:59:43.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:43.524Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:43.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:43.525Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:43.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:43.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:43.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:43.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:43.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:43.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:43.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:43.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:44.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:43 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/224079064' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T19:59:44.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:43 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3613301466' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T19:59:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:43 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/224079064' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T19:59:44.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:43 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3613301466' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T19:59:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:43 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/224079064' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T19:59:44.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:43 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3613301466' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T19:59:45.002 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T19:59:45.165 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:45 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ignoring --setuser ceph since I am not root 2026-03-09T19:59:45.165 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:45 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ignoring --setgroup ceph since I am not root 2026-03-09T19:59:45.165 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:45 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:45.095+0000 7f4837ec9000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T19:59:45.166 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:44 vm01 ceph-mon[49698]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:45.166 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:44 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3628186547' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T19:59:45.166 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:44 vm01 ceph-mon[49698]: osdmap e79: 8 total, 8 up, 8 in 2026-03-09T19:59:45.166 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:44 vm01 ceph-mon[53908]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:45.166 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:44 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3628186547' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T19:59:45.166 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:44 vm01 ceph-mon[53908]: osdmap e79: 8 total, 8 up, 8 in 2026-03-09T19:59:45.212 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:44 vm06 ceph-mon[48187]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T19:59:45.212 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:44 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3628186547' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T19:59:45.212 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:44 vm06 ceph-mon[48187]: osdmap e79: 8 total, 8 up, 8 in 2026-03-09T19:59:45.212 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:19:59:44] ENGINE Bus STOPPING 2026-03-09T19:59:45.212 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:45 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:19:59:45] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T19:59:45.213 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:45 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:19:59:45] ENGINE Bus STOPPED 2026-03-09T19:59:45.436 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:45 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:45.164+0000 7f4837ec9000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T19:59:45.436 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:45.343Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": dial tcp 192.168.123.101:8443: connect: connection refused" 2026-03-09T19:59:45.537 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:45 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:19:59:45] ENGINE Bus STARTING 2026-03-09T19:59:45.537 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:45 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:19:59:45] ENGINE Serving on http://:::9283 2026-03-09T19:59:45.537 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:45 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:19:59:45] ENGINE Bus STARTED 2026-03-09T19:59:45.692 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:45.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": dial tcp 192.168.123.101:8443: connect: connection refused" 2026-03-09T19:59:46.011 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:45 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:45.685+0000 7f4837ec9000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T19:59:46.012 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:45.745Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": dial tcp 192.168.123.106:8443: connect: connection refused" 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3628186547' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: mgrmap e22: x(active, starting, since 0.6511s) 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: Manager daemon x is now available 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: Queued rgw.foo for migration 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-09T19:59:46.012 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: Checking for cert/key for grafana.a 2026-03-09T19:59:46.013 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:46.013 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T19:59:46.013 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:46.013 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:46.013 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:46.013 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:46.018 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3628186547' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T19:59:46.018 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: mgrmap e22: x(active, starting, since 0.6511s) 2026-03-09T19:59:46.018 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:59:46.018 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: Manager daemon x is now available 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: Queued rgw.foo for migration 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: Checking for cert/key for grafana.a 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:46.019 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:45 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:46.278 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:46.126+0000 7f4837ec9000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3628186547' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: mgrmap e22: x(active, starting, since 0.6511s) 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: Manager daemon x is now available 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: Queued rgw.foo for migration 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-09T19:59:46.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-09T19:59:46.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: Checking for cert/key for grafana.a 2026-03-09T19:59:46.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:46.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T19:59:46.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:46.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T19:59:46.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:46.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:45 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T19:59:46.551 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:46.277+0000 7f4837ec9000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T19:59:46.551 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:46.361+0000 7f4837ec9000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T19:59:46.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:46.550+0000 7f4837ec9000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T19:59:47.222 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:46 vm06 ceph-mon[48187]: Deploying cephadm binary to vm06 2026-03-09T19:59:47.222 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:46 vm06 ceph-mon[48187]: mgrmap e23: x(active, since 1.68918s) 2026-03-09T19:59:47.222 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:46 vm06 ceph-mon[48187]: Deploying cephadm binary to vm01 2026-03-09T19:59:47.222 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:46 vm06 ceph-mon[48187]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:47.222 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:46 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:47.222 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:46 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:47.254 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:46.926Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=5 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:47.254 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[49698]: Deploying cephadm binary to vm06 2026-03-09T19:59:47.254 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[49698]: mgrmap e23: x(active, since 1.68918s) 2026-03-09T19:59:47.254 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[49698]: Deploying cephadm binary to vm01 2026-03-09T19:59:47.254 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[49698]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:47.254 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:47.254 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:47.254 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[53908]: Deploying cephadm binary to vm06 2026-03-09T19:59:47.255 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[53908]: mgrmap e23: x(active, since 1.68918s) 2026-03-09T19:59:47.255 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[53908]: Deploying cephadm binary to vm01 2026-03-09T19:59:47.255 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[53908]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:47.255 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:47.255 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:46 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:47.515 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:47.253+0000 7f4837ec9000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T19:59:47.515 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:47.511+0000 7f4837ec9000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:59:47.808 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:47.583+0000 7f4837ec9000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T19:59:47.809 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:47.661+0000 7f4837ec9000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T19:59:47.809 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:47.739+0000 7f4837ec9000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T19:59:47.809 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:47.807+0000 7f4837ec9000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T19:59:48.082 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:47 vm01 ceph-mon[49698]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:48.082 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:47 vm01 ceph-mon[49698]: mgrmap e24: x(active, since 3s) 2026-03-09T19:59:48.082 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:47 vm01 ceph-mon[53908]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:48.082 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:47 vm01 ceph-mon[53908]: mgrmap e24: x(active, since 3s) 2026-03-09T19:59:48.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:47 vm06 ceph-mon[48187]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:48.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:47 vm06 ceph-mon[48187]: mgrmap e24: x(active, since 3s) 2026-03-09T19:59:48.386 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:48 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:48.196+0000 7f4837ec9000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T19:59:48.386 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:48 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:48.292+0000 7f4837ec9000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T19:59:49.288 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:48 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:48.995+0000 7f4837ec9000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T19:59:49.288 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:49 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:49.082+0000 7f4837ec9000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T19:59:49.288 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:49 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:49.164+0000 7f4837ec9000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T19:59:49.543 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: [09/Mar/2026:19:59:48] ENGINE Bus STARTING 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: [09/Mar/2026:19:59:48] ENGINE Serving on http://192.168.123.106:8765 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: [09/Mar/2026:19:59:48] ENGINE Serving on https://192.168.123.106:7150 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: [09/Mar/2026:19:59:48] ENGINE Bus STARTED 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: [09/Mar/2026:19:59:48] ENGINE Client ('192.168.123.106', 40776) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: [09/Mar/2026:19:59:48] ENGINE Bus STARTING 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: [09/Mar/2026:19:59:48] ENGINE Serving on http://192.168.123.106:8765 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: [09/Mar/2026:19:59:48] ENGINE Serving on https://192.168.123.106:7150 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: [09/Mar/2026:19:59:48] ENGINE Bus STARTED 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: [09/Mar/2026:19:59:48] ENGINE Client ('192.168.123.106', 40776) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:49.544 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:49 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.546 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:49 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:49.312+0000 7f4837ec9000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T19:59:49.546 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:49 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:49.409+0000 7f4837ec9000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T19:59:49.546 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:49 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:49.543+0000 7f4837ec9000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: [09/Mar/2026:19:59:48] ENGINE Bus STARTING 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: [09/Mar/2026:19:59:48] ENGINE Serving on http://192.168.123.106:8765 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: [09/Mar/2026:19:59:48] ENGINE Serving on https://192.168.123.106:7150 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: [09/Mar/2026:19:59:48] ENGINE Bus STARTED 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: [09/Mar/2026:19:59:48] ENGINE Client ('192.168.123.106', 40776) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:49 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:49.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:49 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:49.651+0000 7f4837ec9000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T19:59:50.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:50.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:50.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: mgrmap e25: x(active, since 5s) 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: Standby manager daemon y started 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:50.075+0000 7f4837ec9000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:50.141+0000 7f4837ec9000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:59:50] ENGINE Bus STARTING 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: CherryPy Checker: 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: The Application mounted at '' has an empty config. 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:59:50] ENGINE Serving on http://:::9283 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: [09/Mar/2026:19:59:50] ENGINE Bus STARTED 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: mgrmap e25: x(active, since 5s) 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: Standby manager daemon y started 2026-03-09T19:59:50.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T19:59:50.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:59:50.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T19:59:50.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:50 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='client.24595 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: mgrmap e25: x(active, since 5s) 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: Standby manager daemon y started 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T19:59:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:50 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/2909181393' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T19:59:50.695 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 systemd[1]: Stopping Ceph node-exporter.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:59:50.696 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 podman[80405]: 2026-03-09 19:59:50.547284658 +0000 UTC m=+0.043245431 container died 78658dbfd7848bf40ced65c65a6faa0b8100659d561f727dbf0b53bbeb3151d3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:50.696 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 podman[80405]: 2026-03-09 19:59:50.56448103 +0000 UTC m=+0.060441803 container remove 78658dbfd7848bf40ced65c65a6faa0b8100659d561f727dbf0b53bbeb3151d3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:50.696 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 bash[80405]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a 2026-03-09T19:59:50.696 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-09T19:59:50.696 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-09T19:59:50.696 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 systemd[1]: Stopped Ceph node-exporter.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:59:50.696 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.a.service: Consumed 1.414s CPU time. 2026-03-09T19:59:51.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 19:59:50 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:50] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T19:59:51.128 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 systemd[1]: Starting Ceph node-exporter.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:59:51.128 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:50 vm01 bash[80514]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: Deploying daemon node-exporter.a on vm01 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:51 vm06 ceph-mon[48187]: mgrmap e26: x(active, since 6s), standbys: y 2026-03-09T19:59:51.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 19:59:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:19:59:51] "GET /metrics HTTP/1.1" 200 34539 "" "Prometheus/2.33.4" 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: Deploying daemon node-exporter.a on vm01 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[49698]: mgrmap e26: x(active, since 6s), standbys: y 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: Deploying daemon node-exporter.a on vm01 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T19:59:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:51 vm01 ceph-mon[53908]: mgrmap e26: x(active, since 6s), standbys: y 2026-03-09T19:59:52.378 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 bash[80514]: Getting image source signatures 2026-03-09T19:59:52.378 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 bash[80514]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-09T19:59:52.378 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 bash[80514]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-09T19:59:52.378 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 bash[80514]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-09T19:59:52.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-mon[49698]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:52.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:52 vm01 ceph-mon[53908]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:52.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:52 vm06 ceph-mon[48187]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-09T19:59:53.131 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:52.880Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=7 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 bash[80514]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 bash[80514]: Writing manifest to image destination 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 podman[80514]: 2026-03-09 19:59:52.8853979 +0000 UTC m=+1.990046535 container create 6e21ac8930a542ebf1540c7598780de18e2111a3e03664d148f32cba65bd9a44 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 podman[80514]: 2026-03-09 19:59:52.873891076 +0000 UTC m=+1.978539721 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 podman[80514]: 2026-03-09 19:59:52.919768682 +0000 UTC m=+2.024417327 container init 6e21ac8930a542ebf1540c7598780de18e2111a3e03664d148f32cba65bd9a44 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 podman[80514]: 2026-03-09 19:59:52.922376693 +0000 UTC m=+2.027025328 container start 6e21ac8930a542ebf1540c7598780de18e2111a3e03664d148f32cba65bd9a44 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 bash[80514]: 6e21ac8930a542ebf1540c7598780de18e2111a3e03664d148f32cba65bd9a44 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.926Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.926Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.926Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.926Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.926Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=arp 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=edac 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-09T19:59:53.133 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.927Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=os 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=stat 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=time 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=uname 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a[80568]: ts=2026-03-09T19:59:52.928Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-09T19:59:53.134 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 19:59:52 vm01 systemd[1]: Started Ceph node-exporter.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:59:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:53.526Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": dial tcp 192.168.123.101:8443: connect: connection refused; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=error ts=2026-03-09T19:59:53.527Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:53.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:53.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:53.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.106:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.106 because it doesn't contain any IP SANs" 2026-03-09T19:59:53.878 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=warn ts=2026-03-09T19:59:53.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.101:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.101 because it doesn't contain any IP SANs" 2026-03-09T19:59:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:53 vm06 ceph-mon[48187]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T19:59:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:53 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:53 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:53 vm06 ceph-mon[48187]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T19:59:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:53 vm06 ceph-mon[48187]: Deploying daemon alertmanager.a on vm01 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[49698]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[49698]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[49698]: Deploying daemon alertmanager.a on vm01 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[53908]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[53908]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T19:59:54.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:53 vm01 ceph-mon[53908]: Deploying daemon alertmanager.a on vm01 2026-03-09T19:59:56.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:55 vm06 ceph-mon[48187]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T19:59:56.220 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:55 vm01 ceph-mon[49698]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T19:59:56.221 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 systemd[1]: Stopping Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:59:56.221 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[75083]: level=info ts=2026-03-09T19:59:56.180Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-09T19:59:56.221 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 podman[80832]: 2026-03-09 19:59:56.191951326 +0000 UTC m=+0.025692653 container died cf3634b7566553e2fd345cbc95c88e638798b2c400410974e4fe3acd9b6ea90b (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:56.221 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 podman[80832]: 2026-03-09 19:59:56.209348474 +0000 UTC m=+0.043089801 container remove cf3634b7566553e2fd345cbc95c88e638798b2c400410974e4fe3acd9b6ea90b (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:56.221 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 podman[80832]: 2026-03-09 19:59:56.210738887 +0000 UTC m=+0.044480203 volume remove de6c0e601f713dc9a6ae40700acd6248547aff6de45ef641c613c21e3309d80b 2026-03-09T19:59:56.221 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 bash[80832]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a 2026-03-09T19:59:56.221 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:55 vm01 ceph-mon[53908]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T19:59:56.595 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@alertmanager.a.service: Deactivated successfully. 2026-03-09T19:59:56.596 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 systemd[1]: Stopped Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:59:56.596 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@alertmanager.a.service: Consumed 1.203s CPU time. 2026-03-09T19:59:56.596 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 systemd[1]: Starting Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 podman[80941]: 2026-03-09 19:59:56.595915962 +0000 UTC m=+0.021678139 volume create bcede7e8dc799f1d66ff6521154c2879291df650876c967668b9f1a3029ef399 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 podman[80941]: 2026-03-09 19:59:56.599755057 +0000 UTC m=+0.025517234 container create 10eae0f056a0ac235a3454e02bb0833a478bd6a54070e110f6c772e5a491aed4 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 podman[80941]: 2026-03-09 19:59:56.632519653 +0000 UTC m=+0.058281830 container init 10eae0f056a0ac235a3454e02bb0833a478bd6a54070e110f6c772e5a491aed4 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 podman[80941]: 2026-03-09 19:59:56.636798871 +0000 UTC m=+0.062561048 container start 10eae0f056a0ac235a3454e02bb0833a478bd6a54070e110f6c772e5a491aed4 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 bash[80941]: 10eae0f056a0ac235a3454e02bb0833a478bd6a54070e110f6c772e5a491aed4 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 podman[80941]: 2026-03-09 19:59:56.588887198 +0000 UTC m=+0.014649385 image pull c8568f914cd25b2062c44e9f79f9c18da6e3b85fe0c47a12a2191c61426c2b19 quay.io/prometheus/alertmanager:v0.25.0 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 systemd[1]: Started Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T19:59:56.659Z caller=main.go:240 level=info msg="Starting Alertmanager" version="(version=0.25.0, branch=HEAD, revision=258fab7cdd551f2cf251ed0348f0ad7289aee789)" 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T19:59:56.659Z caller=main.go:241 level=info build_context="(go=go1.19.4, user=root@abe866dd5717, date=20221222-14:51:36)" 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T19:59:56.661Z caller=cluster.go:185 level=info component=cluster msg="setting advertise address explicitly" addr=192.168.123.101 port=9094 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T19:59:56.666Z caller=cluster.go:681 level=info component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-09T19:59:56.879 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T19:59:56.696Z caller=coordinator.go:113 level=info component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T19:59:56.880 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T19:59:56.697Z caller=coordinator.go:126 level=info component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T19:59:56.880 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T19:59:56.700Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9093 2026-03-09T19:59:56.880 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T19:59:56.700Z caller=tls_config.go:235 level=info msg="TLS is disabled." http2=false address=[::]:9093 2026-03-09T19:59:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T19:59:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:59:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:59:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:59:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T19:59:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:57.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:57 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:58.025 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:58.025 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:58.025 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T19:59:58.026 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:59:58.026 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T19:59:58.026 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T19:59:58.026 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T19:59:58.026 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T19:59:58.026 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:58.026 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:57 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T19:59:58.026 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:57 vm06 systemd[1]: Stopping Ceph node-exporter.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:59:58.342 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:58 vm06 podman[69100]: 2026-03-09 19:59:58.025144884 +0000 UTC m=+0.030057188 container died 65e409c65bb862973fc99460758043f8d297927e76ca41b358878143d6c11b9a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T19:59:58.342 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:58 vm06 podman[69100]: 2026-03-09 19:59:58.047997867 +0000 UTC m=+0.052910161 container remove 65e409c65bb862973fc99460758043f8d297927e76ca41b358878143d6c11b9a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T19:59:58.342 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:58 vm06 bash[69100]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b 2026-03-09T19:59:58.342 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:58 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-09T19:59:58.342 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:58 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-09T19:59:58.342 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:58 vm06 systemd[1]: Stopped Ceph node-exporter.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T19:59:58.342 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:58 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.b.service: Consumed 1.426s CPU time. 2026-03-09T19:59:58.592 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:58 vm06 systemd[1]: Starting Ceph node-exporter.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T19:59:58.592 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:58 vm06 bash[69208]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-09T19:59:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:58 vm06 ceph-mon[48187]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-09T19:59:59.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:58 vm06 ceph-mon[48187]: Deploying daemon node-exporter.b on vm06 2026-03-09T19:59:59.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:58 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2851272740' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T19:59:59.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:58 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1159652315' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2587305647"}]: dispatch 2026-03-09T19:59:59.128 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 19:59:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T19:59:58.667Z caller=cluster.go:706 level=info component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000619501s 2026-03-09T19:59:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:58 vm01 ceph-mon[49698]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-09T19:59:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:58 vm01 ceph-mon[49698]: Deploying daemon node-exporter.b on vm06 2026-03-09T19:59:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:58 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2851272740' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T19:59:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:58 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1159652315' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2587305647"}]: dispatch 2026-03-09T19:59:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:58 vm01 ceph-mon[53908]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-09T19:59:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:58 vm01 ceph-mon[53908]: Deploying daemon node-exporter.b on vm06 2026-03-09T19:59:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:58 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2851272740' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T19:59:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:58 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1159652315' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2587305647"}]: dispatch 2026-03-09T20:00:00.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:59 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1159652315' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2587305647"}]': finished 2026-03-09T20:00:00.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:59 vm06 ceph-mon[48187]: osdmap e80: 8 total, 8 up, 8 in 2026-03-09T20:00:00.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:59 vm06 ceph-mon[48187]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T20:00:00.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:59 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/623491142' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3749176550"}]: dispatch 2026-03-09T20:00:00.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 19:59:59 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3749176550"}]: dispatch 2026-03-09T20:00:00.092 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:59 vm06 bash[69208]: Getting image source signatures 2026-03-09T20:00:00.092 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:59 vm06 bash[69208]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-09T20:00:00.092 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:59 vm06 bash[69208]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-09T20:00:00.092 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 19:59:59 vm06 bash[69208]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-09T20:00:00.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1159652315' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2587305647"}]': finished 2026-03-09T20:00:00.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[49698]: osdmap e80: 8 total, 8 up, 8 in 2026-03-09T20:00:00.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[49698]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T20:00:00.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/623491142' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3749176550"}]: dispatch 2026-03-09T20:00:00.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3749176550"}]: dispatch 2026-03-09T20:00:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1159652315' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2587305647"}]': finished 2026-03-09T20:00:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[53908]: osdmap e80: 8 total, 8 up, 8 in 2026-03-09T20:00:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[53908]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T20:00:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/623491142' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3749176550"}]: dispatch 2026-03-09T20:00:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 19:59:59 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3749176550"}]: dispatch 2026-03-09T20:00:00.860 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3749176550"}]': finished 2026-03-09T20:00:00.860 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-mon[48187]: osdmap e81: 8 total, 8 up, 8 in 2026-03-09T20:00:00.860 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-mon[48187]: overall HEALTH_OK 2026-03-09T20:00:00.860 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2764238844' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3734821764"}]: dispatch 2026-03-09T20:00:00.861 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3734821764"}]: dispatch 2026-03-09T20:00:00.861 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 bash[69208]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-09T20:00:00.861 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 bash[69208]: Writing manifest to image destination 2026-03-09T20:00:00.861 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 podman[69208]: 2026-03-09 20:00:00.625558465 +0000 UTC m=+2.186449831 container create f65efb03618892fdf4ac7622b683f743103e11546b4a3c59bde24b4f9a956c4c (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T20:00:00.861 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 podman[69208]: 2026-03-09 20:00:00.655403945 +0000 UTC m=+2.216295311 container init f65efb03618892fdf4ac7622b683f743103e11546b4a3c59bde24b4f9a956c4c (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T20:00:00.861 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 podman[69208]: 2026-03-09 20:00:00.658349519 +0000 UTC m=+2.219240885 container start f65efb03618892fdf4ac7622b683f743103e11546b4a3c59bde24b4f9a956c4c (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T20:00:00.861 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 bash[69208]: f65efb03618892fdf4ac7622b683f743103e11546b4a3c59bde24b4f9a956c4c 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 podman[69208]: 2026-03-09 20:00:00.61885087 +0000 UTC m=+2.179742246 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 systemd[1]: Started Ceph node-exporter.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.675Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.675Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.676Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.676Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.677Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.677Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=arp 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=edac 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.678Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=os 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=stat 2026-03-09T20:00:00.862 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=time 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=uname 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.679Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.680Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-09T20:00:00.863 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b[69262]: ts=2026-03-09T20:00:00.680Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-09T20:00:00.864 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:01.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:00:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[49909]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:00:00] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T20:00:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3749176550"}]': finished 2026-03-09T20:00:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[49698]: osdmap e81: 8 total, 8 up, 8 in 2026-03-09T20:00:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[49698]: overall HEALTH_OK 2026-03-09T20:00:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2764238844' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3734821764"}]: dispatch 2026-03-09T20:00:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3734821764"}]: dispatch 2026-03-09T20:00:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3749176550"}]': finished 2026-03-09T20:00:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[53908]: osdmap e81: 8 total, 8 up, 8 in 2026-03-09T20:00:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[53908]: overall HEALTH_OK 2026-03-09T20:00:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2764238844' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3734821764"}]: dispatch 2026-03-09T20:00:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3734821764"}]: dispatch 2026-03-09T20:00:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:00 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:01.207 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:01.207 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:00 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:01.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:00:01] "GET /metrics HTTP/1.1" 200 37529 "" "Prometheus/2.33.4" 2026-03-09T20:00:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:01 vm06 ceph-mon[48187]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:00:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:01 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3734821764"}]': finished 2026-03-09T20:00:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:01 vm06 ceph-mon[48187]: osdmap e82: 8 total, 8 up, 8 in 2026-03-09T20:00:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:01 vm06 ceph-mon[48187]: Deploying daemon prometheus.a on vm06 2026-03-09T20:00:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:01 vm06 ceph-mon[48187]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:01 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/615686808' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/4186335858"}]: dispatch 2026-03-09T20:00:02.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[49698]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:00:02.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3734821764"}]': finished 2026-03-09T20:00:02.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[49698]: osdmap e82: 8 total, 8 up, 8 in 2026-03-09T20:00:02.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[49698]: Deploying daemon prometheus.a on vm06 2026-03-09T20:00:02.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[49698]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:02.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/615686808' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/4186335858"}]: dispatch 2026-03-09T20:00:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[53908]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:00:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3734821764"}]': finished 2026-03-09T20:00:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[53908]: osdmap e82: 8 total, 8 up, 8 in 2026-03-09T20:00:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[53908]: Deploying daemon prometheus.a on vm06 2026-03-09T20:00:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[53908]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:01 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/615686808' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/4186335858"}]: dispatch 2026-03-09T20:00:02.592 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:02.314Z caller=manager.go:609 level=warn component="rule manager" group=pools msg="Evaluating rule failed" rule="alert: CephPoolGrowthWarning\nexpr: (predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id) group_right()\n ceph_pool_metadata) >= 95\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.9.2\n severity: warning\n type: ceph_default\nannotations:\n description: |\n Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours.\n summary: Pool growth rate may soon exceed it's capacity\n" err="found duplicate series for the match group {pool_id=\"1\"} on the left hand-side of the operation: [{instance=\"192.168.123.106:9283\", job=\"ceph\", pool_id=\"1\"}, {instance=\"192.168.123.101:9283\", job=\"ceph\", pool_id=\"1\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:00:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:02 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/615686808' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/4186335858"}]': finished 2026-03-09T20:00:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:02 vm06 ceph-mon[48187]: osdmap e83: 8 total, 8 up, 8 in 2026-03-09T20:00:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:02 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3147038960' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1068105247"}]: dispatch 2026-03-09T20:00:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:02 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1068105247"}]: dispatch 2026-03-09T20:00:03.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:02 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/615686808' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/4186335858"}]': finished 2026-03-09T20:00:03.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:02 vm01 ceph-mon[49698]: osdmap e83: 8 total, 8 up, 8 in 2026-03-09T20:00:03.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:02 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3147038960' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1068105247"}]: dispatch 2026-03-09T20:00:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:02 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1068105247"}]: dispatch 2026-03-09T20:00:03.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:02 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/615686808' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/4186335858"}]': finished 2026-03-09T20:00:03.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:02 vm01 ceph-mon[53908]: osdmap e83: 8 total, 8 up, 8 in 2026-03-09T20:00:03.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:02 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3147038960' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1068105247"}]: dispatch 2026-03-09T20:00:03.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:02 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1068105247"}]: dispatch 2026-03-09T20:00:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:03 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1068105247"}]': finished 2026-03-09T20:00:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:03 vm06 ceph-mon[48187]: osdmap e84: 8 total, 8 up, 8 in 2026-03-09T20:00:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:03 vm06 ceph-mon[48187]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T20:00:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:03 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2910863871' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1068105247"}]: dispatch 2026-03-09T20:00:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:03 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1068105247"}]: dispatch 2026-03-09T20:00:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1068105247"}]': finished 2026-03-09T20:00:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[49698]: osdmap e84: 8 total, 8 up, 8 in 2026-03-09T20:00:04.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[49698]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T20:00:04.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2910863871' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1068105247"}]: dispatch 2026-03-09T20:00:04.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1068105247"}]: dispatch 2026-03-09T20:00:04.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1068105247"}]': finished 2026-03-09T20:00:04.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[53908]: osdmap e84: 8 total, 8 up, 8 in 2026-03-09T20:00:04.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[53908]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T20:00:04.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2910863871' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1068105247"}]: dispatch 2026-03-09T20:00:04.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:03 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1068105247"}]: dispatch 2026-03-09T20:00:05.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:04 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1068105247"}]': finished 2026-03-09T20:00:05.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:04 vm06 ceph-mon[48187]: osdmap e85: 8 total, 8 up, 8 in 2026-03-09T20:00:05.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:04 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3424388028' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3105792031"}]: dispatch 2026-03-09T20:00:05.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:04 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3105792031"}]: dispatch 2026-03-09T20:00:05.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:04 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1068105247"}]': finished 2026-03-09T20:00:05.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:04 vm01 ceph-mon[49698]: osdmap e85: 8 total, 8 up, 8 in 2026-03-09T20:00:05.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:04 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3424388028' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3105792031"}]: dispatch 2026-03-09T20:00:05.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:04 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3105792031"}]: dispatch 2026-03-09T20:00:05.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:04 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1068105247"}]': finished 2026-03-09T20:00:05.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:04 vm01 ceph-mon[53908]: osdmap e85: 8 total, 8 up, 8 in 2026-03-09T20:00:05.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:04 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3424388028' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3105792031"}]: dispatch 2026-03-09T20:00:05.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:04 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3105792031"}]: dispatch 2026-03-09T20:00:06.337 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:05 vm06 ceph-mon[48187]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T20:00:06.337 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:05 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3105792031"}]': finished 2026-03-09T20:00:06.337 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:05 vm06 ceph-mon[48187]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T20:00:06.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:05 vm01 ceph-mon[49698]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T20:00:06.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:05 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3105792031"}]': finished 2026-03-09T20:00:06.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:05 vm01 ceph-mon[49698]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T20:00:06.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:05 vm01 ceph-mon[53908]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T20:00:06.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:05 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3105792031"}]': finished 2026-03-09T20:00:06.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:05 vm01 ceph-mon[53908]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 systemd[1]: Stopping Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.818Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.818Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.818Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.818Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.818Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.818Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.818Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.818Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.819Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.820Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.821Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[65249]: ts=2026-03-09T20:00:06.821Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 podman[69622]: 2026-03-09 20:00:06.83068689 +0000 UTC m=+0.030367989 container died 6204e0dc9649e13f39e78ab2b561defb3e598893d81c9f3455630d6036ef4ec1 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 podman[69622]: 2026-03-09 20:00:06.8501189 +0000 UTC m=+0.049799989 container remove 6204e0dc9649e13f39e78ab2b561defb3e598893d81c9f3455630d6036ef4ec1 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:00:06.940 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 bash[69622]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a 2026-03-09T20:00:07.128 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:00:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:00:06.669Z caller=cluster.go:698 level=info component=cluster msg="gossip settled; proceeding" elapsed=10.002990433s 2026-03-09T20:00:07.248 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a.service: Deactivated successfully. 2026-03-09T20:00:07.249 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 systemd[1]: Stopped Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:00:07.249 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:06 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a.service: Consumed 1.174s CPU time. 2026-03-09T20:00:07.249 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 systemd[1]: Starting Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 podman[69732]: 2026-03-09 20:00:07.249371978 +0000 UTC m=+0.019784711 container create 1ff5c193e113607d973110731bbae76b6e8878677cccfa434e9aa1a32991ac14 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 podman[69732]: 2026-03-09 20:00:07.280213143 +0000 UTC m=+0.050625886 container init 1ff5c193e113607d973110731bbae76b6e8878677cccfa434e9aa1a32991ac14 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 podman[69732]: 2026-03-09 20:00:07.283055302 +0000 UTC m=+0.053468035 container start 1ff5c193e113607d973110731bbae76b6e8878677cccfa434e9aa1a32991ac14 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 bash[69732]: 1ff5c193e113607d973110731bbae76b6e8878677cccfa434e9aa1a32991ac14 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 podman[69732]: 2026-03-09 20:00:07.240596463 +0000 UTC m=+0.011009207 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 systemd[1]: Started Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.309Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.309Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.309Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm06 (none))" 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.309Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.309Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.320Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.321Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.322Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.322Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.323Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.324Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=587.21µs 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.324Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.333Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=2 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.350Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=2 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.350Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=2 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.350Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=24.736µs wal_replay_duration=25.900717ms wbl_replay_duration=141ns total_replay_duration=26.629171ms 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.355Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.355Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.355Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.375Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=19.573346ms db_storage=922ns remote_storage=1.413µs web_handler=340ns query_engine=672ns scrape=896.829µs scrape_sd=96.982µs notify=9.428µs notify_sd=6.843µs rules=18.141615ms tracing=5.631µs 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.375Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T20:00:07.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:07.375Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T20:00:07.954 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:07 vm06 systemd[1]: Stopping Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[63166]: t=2026-03-09T20:00:07+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:07 vm06 podman[69867]: 2026-03-09 20:00:07.964754544 +0000 UTC m=+0.039469664 container died bf76c513160b647dd882fafd782b807b5783039ce883639289b4352ead6e134c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, io.openshift.expose-services=, build-date=2022-03-28T10:36:18.413762, vcs-type=git, description=Ceph Grafana Container, architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 8, io.buildah.version=1.24.2, name=ubi8, com.redhat.component=ubi8-container, release=236.1648460182, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Paul Cuzner , vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Grafana Container configured for Ceph mgr/dashboard integration, vendor=Red Hat, Inc., version=8.5) 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:07 vm06 podman[69867]: 2026-03-09 20:00:07.984038488 +0000 UTC m=+0.058753608 container remove bf76c513160b647dd882fafd782b807b5783039ce883639289b4352ead6e134c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, release=236.1648460182, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, distribution-scope=public, io.buildah.version=1.24.2, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi8, version=8.5, vcs-type=git, maintainer=Paul Cuzner , io.openshift.tags=base rhel8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.k8s.display-name=Red Hat Universal Base Image 8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, description=Ceph Grafana Container, summary=Grafana Container configured for Ceph mgr/dashboard integration, vendor=Red Hat, Inc., build-date=2022-03-28T10:36:18.413762, com.redhat.component=ubi8-container) 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:07 vm06 bash[69867]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 bash[69884]: Error: no container with name or ID "ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana.a" found: no such container 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@grafana.a.service: Deactivated successfully. 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 systemd[1]: Stopped Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@grafana.a.service: Consumed 1.672s CPU time. 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 systemd[1]: Starting Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 podman[69923]: 2026-03-09 20:00:08.144863899 +0000 UTC m=+0.021049418 container create 694e845e2157592bbb2e6c5b5353b6249a7296fd6373c484e293e620a6070068 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, io.openshift.expose-services=, maintainer=Paul Cuzner , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=Red Hat Universal Base Image 8, description=Ceph Grafana Container, distribution-scope=public, version=8.5, summary=Grafana Container configured for Ceph mgr/dashboard integration, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.openshift.tags=base rhel8, vcs-type=git, build-date=2022-03-28T10:36:18.413762, name=ubi8, vendor=Red Hat, Inc., architecture=x86_64, com.redhat.component=ubi8-container, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, io.buildah.version=1.24.2, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=236.1648460182, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 podman[69923]: 2026-03-09 20:00:08.173696624 +0000 UTC m=+0.049882163 container init 694e845e2157592bbb2e6c5b5353b6249a7296fd6373c484e293e620a6070068 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, com.redhat.component=ubi8-container, summary=Grafana Container configured for Ceph mgr/dashboard integration, io.buildah.version=1.24.2, io.openshift.tags=base rhel8, release=236.1648460182, vcs-type=git, build-date=2022-03-28T10:36:18.413762, io.openshift.expose-services=, maintainer=Paul Cuzner , version=8.5, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, description=Ceph Grafana Container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi8, vendor=Red Hat, Inc.) 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 podman[69923]: 2026-03-09 20:00:08.17918714 +0000 UTC m=+0.055372659 container start 694e845e2157592bbb2e6c5b5353b6249a7296fd6373c484e293e620a6070068 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, distribution-scope=public, vcs-type=git, build-date=2022-03-28T10:36:18.413762, name=ubi8, architecture=x86_64, com.redhat.component=ubi8-container, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.openshift.expose-services=, version=8.5, io.k8s.display-name=Red Hat Universal Base Image 8, release=236.1648460182, vendor=Red Hat, Inc., io.buildah.version=1.24.2, maintainer=Paul Cuzner , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, summary=Grafana Container configured for Ceph mgr/dashboard integration, description=Ceph Grafana Container, io.openshift.tags=base rhel8) 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 bash[69923]: 694e845e2157592bbb2e6c5b5353b6249a7296fd6373c484e293e620a6070068 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 podman[69923]: 2026-03-09 20:00:08.136526234 +0000 UTC m=+0.012711773 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-09T20:00:08.234 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 systemd[1]: Started Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:00:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1014 B/s rd, 0 op/s 2026-03-09T20:00:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: Reconfiguring grafana.a (dependencies changed)... 2026-03-09T20:00:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T20:00:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T20:00:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: Reconfiguring daemon grafana.a on vm06 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:08 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE Bus STOPPING 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE Bus STOPPED 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE Bus STARTING 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE Serving on http://:::9283 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE Bus STARTED 2026-03-09T20:00:08.593 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE Bus STOPPING 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="App mode production" logger=settings 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="migrations completed" logger=migrator performed=0 skipped=377 duration=445.725µs 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-09T20:00:08.593 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-09T20:00:08.594 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-09T20:00:08.594 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="deleted datasource based on configuration" logger=provisioning.datasources name=Dashboard1 2026-03-09T20:00:08.594 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-09T20:00:08.594 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Loki uid=P8E80F9AEF21F6940 2026-03-09T20:00:08.594 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-09T20:00:08.594 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-09T20:00:08.594 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:00:08+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-09T20:00:08.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1014 B/s rd, 0 op/s 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: Reconfiguring grafana.a (dependencies changed)... 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: Reconfiguring daemon grafana.a on vm06 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1014 B/s rd, 0 op/s 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: Reconfiguring grafana.a (dependencies changed)... 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: Reconfiguring daemon grafana.a on vm06 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:00:08.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:00:08.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:08.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:00:08.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T20:00:08.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:08 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:09.037 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:00:09.037 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE Bus STOPPED 2026-03-09T20:00:09.037 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:08] ENGINE Bus STARTING 2026-03-09T20:00:09.037 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:09] ENGINE Serving on http://:::9283 2026-03-09T20:00:09.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:09] ENGINE Bus STARTED 2026-03-09T20:00:09.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:09] ENGINE Bus STOPPING 2026-03-09T20:00:09.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:09.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T20:00:09.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-09T20:00:09.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:00:09.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: Adding iSCSI gateway http://:@192.168.123.101:5000 to Dashboard 2026-03-09T20:00:09.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:00:09.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:00:09.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:00:09.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: Adding iSCSI gateway http://:@192.168.123.101:5000 to Dashboard 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:00:09.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:09 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:09] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:09] ENGINE Bus STOPPED 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:09] ENGINE Bus STARTING 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: Adding iSCSI gateway http://:@192.168.123.101:5000 to Dashboard 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:00:09.640 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:09 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T20:00:10.092 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:09] ENGINE Serving on http://:::9283 2026-03-09T20:00:10.092 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:00:09] ENGINE Bus STARTED 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[49698]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 850 B/s rd, 0 op/s 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[53908]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 850 B/s rd, 0 op/s 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:00:10.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:10 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:10 vm06 ceph-mon[48187]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 850 B/s rd, 0 op/s 2026-03-09T20:00:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:10 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:10 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:10 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:10 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:10 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:00:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:10 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:00:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:10 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:00:13.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:12 vm06 ceph-mon[48187]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T20:00:13.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:12 vm01 ceph-mon[49698]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T20:00:13.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:12 vm01 ceph-mon[53908]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T20:00:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:13 vm06 ceph-mon[48187]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:00:14.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:13 vm01 ceph-mon[49698]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:00:14.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:13 vm01 ceph-mon[53908]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:00:14.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:00:14] "GET /metrics HTTP/1.1" 200 37529 "" "Prometheus/2.51.0" 2026-03-09T20:00:16.320 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:15 vm01 ceph-mon[49698]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:00:16.320 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:16 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:16.321 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:16 vm01 ceph-mon[53908]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:00:16.321 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:16 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:16.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:16 vm06 ceph-mon[48187]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:00:16.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:16 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:18.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:18 vm06 ceph-mon[48187]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:18.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:18 vm01 ceph-mon[49698]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:18.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:18 vm01 ceph-mon[53908]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:19.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:19 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:19.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:19 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:19.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:19 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:20.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:20 vm06 ceph-mon[48187]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T20:00:20.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:20 vm01 ceph-mon[49698]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T20:00:20.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:20 vm01 ceph-mon[53908]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T20:00:22.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:22 vm06 ceph-mon[48187]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T20:00:22.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:22 vm01 ceph-mon[49698]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T20:00:22.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:22 vm01 ceph-mon[53908]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T20:00:24.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:23 vm06 ceph-mon[48187]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:24.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:24.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:00:24.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:23 vm01 ceph-mon[49698]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:24.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:23 vm01 ceph-mon[53908]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:24.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:00:24] "GET /metrics HTTP/1.1" 200 37528 "" "Prometheus/2.51.0" 2026-03-09T20:00:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:26 vm06 ceph-mon[48187]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:26.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:26 vm01 ceph-mon[49698]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:26.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:26 vm01 ceph-mon[53908]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:27.059 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:26 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:00:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:00:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:00:26.950Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:00:28.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:28 vm06 ceph-mon[48187]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:28.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:28 vm01 ceph-mon[49698]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:28.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:28 vm01 ceph-mon[53908]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:29.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:29 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:29.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:29 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:29.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:29 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:30.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:30 vm06 ceph-mon[48187]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:30.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:30 vm01 ceph-mon[49698]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:30.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:30 vm01 ceph-mon[53908]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:31.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:31 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:31.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:31 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:31.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:31 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:32 vm06 ceph-mon[48187]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:32.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:32 vm01 ceph-mon[49698]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:32.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:32 vm01 ceph-mon[53908]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:34.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:00:34.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:33 vm06 ceph-mon[48187]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:34.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:33 vm01 ceph-mon[49698]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:34.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:33 vm01 ceph-mon[53908]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:34.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:00:34] "GET /metrics HTTP/1.1" 200 37530 "" "Prometheus/2.51.0" 2026-03-09T20:00:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:36 vm06 ceph-mon[48187]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:36.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:36 vm01 ceph-mon[49698]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:36.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:36 vm01 ceph-mon[53908]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:37.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:36 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:00:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:00:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:00:36.950Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:00:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:00:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:00:36.951Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:00:38.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:38 vm06 ceph-mon[48187]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:38.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:38 vm01 ceph-mon[49698]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:38.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:38 vm01 ceph-mon[53908]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:39.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:39 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:39.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:39 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:39.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:39 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:40.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:40 vm06 ceph-mon[48187]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:40.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:40 vm01 ceph-mon[49698]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:40.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:40 vm01 ceph-mon[53908]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:42 vm06 ceph-mon[48187]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:42.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:42 vm01 ceph-mon[49698]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:42.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:42 vm01 ceph-mon[53908]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:44.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:43 vm06 ceph-mon[48187]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:44.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:00:44.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:43 vm01 ceph-mon[49698]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:44.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:43 vm01 ceph-mon[53908]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:44.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:00:44] "GET /metrics HTTP/1.1" 200 37530 "" "Prometheus/2.51.0" 2026-03-09T20:00:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:46 vm06 ceph-mon[48187]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:46.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:46 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:46.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:46 vm01 ceph-mon[49698]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:46.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:46 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:46.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:46 vm01 ceph-mon[53908]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:46.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:46 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:00:47.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:00:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:00:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:00:46.951Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:00:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:00:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:00:46.952Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:00:48.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:48 vm06 ceph-mon[48187]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:48.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:48 vm01 ceph-mon[49698]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:48.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:48 vm01 ceph-mon[53908]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:49.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:49 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:49.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:49 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:49.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:49 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:50 vm06 ceph-mon[48187]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:50 vm01 ceph-mon[49698]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:50.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:50 vm01 ceph-mon[53908]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:52.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:52 vm06 ceph-mon[48187]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:52.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:52 vm01 ceph-mon[49698]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:52.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:52 vm01 ceph-mon[53908]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:53 vm06 ceph-mon[48187]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:54.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:00:54.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:53 vm01 ceph-mon[49698]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:54.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:53 vm01 ceph-mon[53908]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:54.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:00:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:00:54] "GET /metrics HTTP/1.1" 200 37514 "" "Prometheus/2.51.0" 2026-03-09T20:00:56.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:56 vm06 ceph-mon[48187]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:56.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:56 vm01 ceph-mon[49698]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:56.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:56 vm01 ceph-mon[53908]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:00:57.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:00:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:00:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:00:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:00:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:00:56.951Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:00:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:00:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:00:56.952Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:00:58.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:58 vm06 ceph-mon[48187]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:58 vm01 ceph-mon[49698]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:58.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:58 vm01 ceph-mon[53908]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:00:59.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:00:59 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:59.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:00:59 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:00:59.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:00:59 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:00.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:00 vm06 ceph-mon[48187]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:00.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:00 vm01 ceph-mon[49698]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:00.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:00 vm01 ceph-mon[53908]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:01.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:01 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:01.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:01 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:01.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:01 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:02 vm06 ceph-mon[48187]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:02.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:02 vm01 ceph-mon[49698]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:02.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:02 vm01 ceph-mon[53908]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:04.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:04.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:04 vm06 ceph-mon[48187]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:04 vm01 ceph-mon[49698]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:04.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:04 vm01 ceph-mon[53908]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:04.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:01:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:01:04] "GET /metrics HTTP/1.1" 200 37522 "" "Prometheus/2.51.0" 2026-03-09T20:01:06.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:06 vm01 ceph-mon[49698]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:06.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:06 vm01 ceph-mon[53908]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:06 vm06 ceph-mon[48187]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:07.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:06.952Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:06.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:08.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:08 vm01 ceph-mon[49698]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:08.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:08 vm01 ceph-mon[53908]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:08 vm06 ceph-mon[48187]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:09.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:09 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:09.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:09 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:09.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:09 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:10.373 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:10 vm01 ceph-mon[49698]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:10.373 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:10 vm01 ceph-mon[53908]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:10.436 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:10 vm06 ceph-mon[48187]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:12.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:12 vm06 ceph-mon[48187]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:12.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:12 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:12 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:12 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:12 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:12 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:01:12.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:12 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:01:12.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:12 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[49698]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[53908]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:12.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:01:12.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:01:12.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:12 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:01:14.464 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:14.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:14.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:14 vm06 ceph-mon[48187]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:14.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:14 vm01 ceph-mon[49698]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:14.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:14 vm01 ceph-mon[53908]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:14.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:01:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:01:14] "GET /metrics HTTP/1.1" 200 37522 "" "Prometheus/2.51.0" 2026-03-09T20:01:16.584 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:16 vm01 ceph-mon[49698]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:16.585 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:16 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:16.585 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:16 vm01 ceph-mon[53908]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:16.585 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:16 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:16 vm06 ceph-mon[48187]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:16 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:17.297 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:16.953Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:16.955Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:18.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:18 vm06 ceph-mon[48187]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:18.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:18 vm01 ceph-mon[49698]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:18.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:18 vm01 ceph-mon[53908]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:19.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:19 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:19.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:19 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:19.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:19 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:20.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:20 vm06 ceph-mon[48187]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:20 vm01 ceph-mon[49698]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:20.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:20 vm01 ceph-mon[53908]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:22.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:22 vm06 ceph-mon[48187]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:22.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:22 vm01 ceph-mon[49698]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:22.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:22 vm01 ceph-mon[53908]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:24.419 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:24.152Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:24.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:24 vm06 ceph-mon[48187]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:24.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:01:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:01:24] "GET /metrics HTTP/1.1" 200 37525 "" "Prometheus/2.51.0" 2026-03-09T20:01:24.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:24 vm01 ceph-mon[49698]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:24.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:24 vm01 ceph-mon[53908]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:26 vm06 ceph-mon[48187]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:26.860 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:26 vm01 ceph-mon[49698]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:26.860 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:26 vm01 ceph-mon[53908]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:27.128 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:26.955Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:27.128 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:26.956Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:27.335 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:26 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:28 vm06 ceph-mon[48187]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:28.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:28 vm01 ceph-mon[49698]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:28.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:28 vm01 ceph-mon[53908]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:29 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:29.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:29 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:29 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:30 vm06 ceph-mon[48187]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:30 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:30 vm01 ceph-mon[49698]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:30 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:30 vm01 ceph-mon[53908]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:30 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:31 vm06 ceph-mon[48187]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:31.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:31 vm01 ceph-mon[49698]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:31.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:31 vm01 ceph-mon[53908]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:34.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:34.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:34.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:34 vm06 ceph-mon[48187]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:34.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:34 vm01 ceph-mon[49698]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:34.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:34 vm01 ceph-mon[53908]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:34.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:01:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:01:34] "GET /metrics HTTP/1.1" 200 37521 "" "Prometheus/2.51.0" 2026-03-09T20:01:36.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:36 vm06 ceph-mon[48187]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:36 vm01 ceph-mon[49698]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:36.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:36 vm01 ceph-mon[53908]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:37.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:36 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:36.956Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:36.957Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:38 vm06 ceph-mon[48187]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:38.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:38 vm01 ceph-mon[49698]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:38.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:38 vm01 ceph-mon[53908]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:39 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:39 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:39 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:40.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:40 vm06 ceph-mon[48187]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:40.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:40 vm01 ceph-mon[49698]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:40.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:40 vm01 ceph-mon[53908]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:42.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:42 vm06 ceph-mon[48187]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:42.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:42 vm01 ceph-mon[49698]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:42.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:42 vm01 ceph-mon[53908]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:44.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:44.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:44 vm06 ceph-mon[48187]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:44.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:44 vm01 ceph-mon[49698]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:44.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:44 vm01 ceph-mon[53908]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:44.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:01:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:01:44] "GET /metrics HTTP/1.1" 200 37521 "" "Prometheus/2.51.0" 2026-03-09T20:01:46.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:46 vm06 ceph-mon[48187]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:46.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:46 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:46.608 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:46 vm01 ceph-mon[49698]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:46.608 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:46 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:46.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:46 vm01 ceph-mon[53908]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:46.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:46 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:01:47.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:46.957Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:46.958Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:48.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:48 vm06 ceph-mon[48187]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:48.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:48 vm01 ceph-mon[49698]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:48.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:48 vm01 ceph-mon[53908]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:49 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:49.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:49 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:49.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:49 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:50.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:50 vm01 ceph-mon[49698]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:50.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:50 vm01 ceph-mon[53908]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:50 vm06 ceph-mon[48187]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:51.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:51 vm01 ceph-mon[49698]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:51.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:51 vm01 ceph-mon[53908]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:52.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:51 vm06 ceph-mon[48187]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:54.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:54.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:54 vm06 ceph-mon[48187]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:54.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:54 vm01 ceph-mon[49698]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:54.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:54 vm01 ceph-mon[53908]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:54.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:01:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:01:54] "GET /metrics HTTP/1.1" 200 37524 "" "Prometheus/2.51.0" 2026-03-09T20:01:56.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:56 vm06 ceph-mon[48187]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:56.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:56 vm01 ceph-mon[49698]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:56.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:56 vm01 ceph-mon[53908]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:01:57.231 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:56.958Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:57.231 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:01:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:01:56.959Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:01:57.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:01:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:01:56.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:01:58.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:58 vm06 ceph-mon[48187]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:58 vm01 ceph-mon[49698]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:58.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:58 vm01 ceph-mon[53908]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:01:59.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:01:59 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:59.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:01:59 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:01:59.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:01:59 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:00.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:00 vm06 ceph-mon[48187]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:00.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:00 vm01 ceph-mon[49698]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:00.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:00 vm01 ceph-mon[53908]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:01.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:01 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:01.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:01 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:01.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:01 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:02 vm06 ceph-mon[48187]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:02.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:02 vm01 ceph-mon[49698]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:02 vm01 ceph-mon[53908]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:04.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:04.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:04 vm06 ceph-mon[48187]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:04 vm01 ceph-mon[49698]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:04.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:04 vm01 ceph-mon[53908]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:04.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:02:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:02:04] "GET /metrics HTTP/1.1" 200 37531 "" "Prometheus/2.51.0" 2026-03-09T20:02:06.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:06 vm01 ceph-mon[49698]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:06.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:06 vm01 ceph-mon[53908]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:06 vm06 ceph-mon[48187]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:07.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:06.959Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:06.959Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:08.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:08 vm01 ceph-mon[49698]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:08.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:08 vm01 ceph-mon[53908]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:08 vm06 ceph-mon[48187]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:09.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:09 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:09.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:09 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:09.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:09 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:10.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:10 vm01 ceph-mon[49698]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:10.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:10 vm01 ceph-mon[53908]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:10.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:10 vm06 ceph-mon[48187]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:12.430 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:12 vm06 ceph-mon[48187]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:12.430 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:12 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.430 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:12 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.430 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:12 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.430 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:12 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.466 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[49698]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:12.466 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.466 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.466 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.466 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.467 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[53908]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:12.467 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.467 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.467 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:12.467 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:12 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:14.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:14.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:14 vm06 ceph-mon[48187]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:14.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:14 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:14 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:14 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:14 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:14 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:02:14.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:14 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:02:14.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:14 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[49698]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:14.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[53908]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:02:14.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:14 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:14.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:02:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:02:14] "GET /metrics HTTP/1.1" 200 37531 "" "Prometheus/2.51.0" 2026-03-09T20:02:16.432 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:16 vm01 ceph-mon[49698]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:16.432 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:16 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:16.432 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:16 vm01 ceph-mon[53908]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:16.432 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:16 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:16 vm06 ceph-mon[48187]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:16 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:17.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:16.959Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:16.960Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:18.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:18 vm06 ceph-mon[48187]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:18.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:18 vm01 ceph-mon[49698]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:18.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:18 vm01 ceph-mon[53908]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:19.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:19 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:19.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:19 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:19.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:19 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:20.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:20 vm06 ceph-mon[48187]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:20 vm01 ceph-mon[49698]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:20.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:20 vm01 ceph-mon[53908]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:22.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:22 vm06 ceph-mon[48187]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:22.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:22 vm01 ceph-mon[49698]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:22.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:22 vm01 ceph-mon[53908]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:24.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:24 vm06 ceph-mon[48187]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:24.464 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:24.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:24 vm01 ceph-mon[49698]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:24.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:24 vm01 ceph-mon[53908]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:24.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:02:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:02:24] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T20:02:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:26 vm06 ceph-mon[48187]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:26 vm01 ceph-mon[49698]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:26 vm01 ceph-mon[53908]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:27.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:26 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:26.961Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:26.962Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:28.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:28 vm06 ceph-mon[48187]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:28.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:28 vm01 ceph-mon[49698]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:28.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:28 vm01 ceph-mon[53908]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:29.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:29 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:29.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:29 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:29.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:29 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:30 vm06 ceph-mon[48187]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:30.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:30 vm01 ceph-mon[49698]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:30.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:30 vm01 ceph-mon[53908]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:31 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:31.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:31 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:31.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:31 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:32.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:32 vm06 ceph-mon[48187]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:32.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:32 vm01 ceph-mon[49698]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:32.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:32 vm01 ceph-mon[53908]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:34.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:34.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:34 vm06 ceph-mon[48187]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:34.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:34 vm01 ceph-mon[49698]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:34.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:34 vm01 ceph-mon[53908]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:34.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:02:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:02:34] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-09T20:02:36.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:36 vm06 ceph-mon[48187]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:36 vm01 ceph-mon[49698]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:36 vm01 ceph-mon[53908]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:37.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:36 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:36.962Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:36.963Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:38 vm06 ceph-mon[48187]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:38.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:38 vm01 ceph-mon[49698]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:38.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:38 vm01 ceph-mon[53908]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:39 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:39 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:39 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:40.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:40 vm06 ceph-mon[48187]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:40.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:40 vm01 ceph-mon[49698]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:40.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:40 vm01 ceph-mon[53908]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:42.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:42 vm06 ceph-mon[48187]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:42.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:42 vm01 ceph-mon[49698]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:42.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:42 vm01 ceph-mon[53908]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:44.464 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:44.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:44.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:44 vm06 ceph-mon[48187]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:44.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:44 vm01 ceph-mon[49698]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:44.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:44 vm01 ceph-mon[53908]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:44.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:02:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:02:44] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-09T20:02:45.467 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-09T20:02:45.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:45.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:02:46.348 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to redeploy mgr.y on host 'vm01' 2026-03-09T20:02:46.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[49698]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:46.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3057445610' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T20:02:46.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:46.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:46.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:46.381 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:02:46.381 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[53908]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:46.381 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3057445610' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T20:02:46.381 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:46.381 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:46.381 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:46.381 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:46 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:02:46.409 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps --refresh' 2026-03-09T20:02:46.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:46 vm06 ceph-mon[48187]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:46.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:46 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3057445610' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T20:02:46.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:46 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:46.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:46 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:46.744 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:46 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:46.744 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:46 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (2m) 33s ago 9m 23.7M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (2m) 33s ago 9m 47.7M - dad864ee21e9 694e845e2157 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (2m) 33s ago 9m 45.6M - 3.5 e1d6a67b021e 5e886c64eb22 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283 running (5m) 33s ago 11m 558M - 19.2.3-678-ge911bdeb 654f31e6858e 0b7c9ce0e145 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:9283 running (11m) 33s ago 11m 419M - 17.2.0 e1d6a67b021e 4f3dd125b57b 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (11m) 33s ago 11m 62.9M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (11m) 33s ago 11m 48.0M 2048M 17.2.0 e1d6a67b021e 9f080c80b1b8 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (11m) 33s ago 11m 44.9M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (2m) 33s ago 9m 9495k - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (2m) 33s ago 9m 9470k - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (10m) 33s ago 10m 54.0M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (10m) 33s ago 10m 53.0M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (10m) 33s ago 10m 49.2M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (10m) 33s ago 10m 51.7M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (10m) 33s ago 10m 53.5M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (10m) 33s ago 10m 50.9M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (10m) 33s ago 10m 47.9M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (9m) 33s ago 9m 51.8M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (2m) 33s ago 9m 50.3M - 2.51.0 1d3b7f56885b 1ff5c193e113 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (9m) 33s ago 9m 91.5M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:02:47.029 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (9m) 33s ago 9m 91.4M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:02:47.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:47.113 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T20:02:47.240 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:46.963Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:47.240 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:46.965Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[49698]: from='client.24859 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.y", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[49698]: Schedule redeploy daemon mgr.y 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[53908]: from='client.24859 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.y", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[53908]: Schedule redeploy daemon mgr.y 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:02:47.499 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:47 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:02:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:47 vm06 ceph-mon[48187]: from='client.24859 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.y", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:02:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:47 vm06 ceph-mon[48187]: Schedule redeploy daemon mgr.y 2026-03-09T20:02:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:47 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:02:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:47 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:02:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:47 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:02:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:47 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:02:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:47 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:02:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:47 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:02:48.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:48 vm06 ceph-mon[48187]: Deploying daemon mgr.y on vm01 2026-03-09T20:02:48.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:48 vm06 ceph-mon[48187]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:48.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:48 vm06 ceph-mon[48187]: from='client.24862 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:02:48.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:48 vm01 ceph-mon[49698]: Deploying daemon mgr.y on vm01 2026-03-09T20:02:48.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:48 vm01 ceph-mon[49698]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:48.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:48 vm01 ceph-mon[49698]: from='client.24862 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:02:48.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:48 vm01 ceph-mon[53908]: Deploying daemon mgr.y on vm01 2026-03-09T20:02:48.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:48 vm01 ceph-mon[53908]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:48.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:48 vm01 ceph-mon[53908]: from='client.24862 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:02:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:49 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:49.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:49 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:49.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:49 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:50.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:50 vm06 ceph-mon[48187]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:50.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:50 vm01 ceph-mon[49698]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:50.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:50 vm01 ceph-mon[53908]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:52 vm06 ceph-mon[48187]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:52.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:52 vm01 ceph-mon[53908]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:52.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:52 vm01 ceph-mon[49698]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:54.464 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:54.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:54.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:54 vm06 ceph-mon[48187]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:54.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:02:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:02:54] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T20:02:55.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:54 vm01 ceph-mon[49698]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:55.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:54 vm01 ceph-mon[53908]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:56.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:55 vm06 ceph-mon[48187]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:56.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:55 vm01 ceph-mon[49698]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:56.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:55 vm01 ceph-mon[53908]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:02:57.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:02:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:02:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:02:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:56.965Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:02:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:02:56.967Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:02:58.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:58 vm01 ceph-mon[49698]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:58.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:58 vm01 ceph-mon[53908]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:58.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:58 vm06 ceph-mon[48187]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:02:59.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:02:59 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:59.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:02:59 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:02:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:02:59 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:00.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:00 vm01 ceph-mon[49698]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:00.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:00 vm01 ceph-mon[53908]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:00.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:00 vm06 ceph-mon[48187]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:01.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:01 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:01.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:01 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:01.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:01 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:02.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:02 vm06 ceph-mon[48187]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:02.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:02 vm01 ceph-mon[53908]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:02.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:02 vm01 ceph-mon[49698]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:04.464 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:04.464 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:04 vm06 ceph-mon[48187]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:04.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:04 vm01 ceph-mon[49698]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:04.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:04 vm01 ceph-mon[53908]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:04.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:03:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:03:04] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T20:03:06.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:06 vm01 ceph-mon[53908]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:06.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:06 vm01 ceph-mon[49698]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:06 vm06 ceph-mon[48187]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:07.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:06.966Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:06.967Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:08.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:08 vm01 ceph-mon[49698]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:08.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:08 vm01 ceph-mon[53908]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:08.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:08 vm06 ceph-mon[48187]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:09 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:09 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:09.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:09 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:10 vm06 ceph-mon[48187]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:10.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:10 vm01 ceph-mon[49698]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:10.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:10 vm01 ceph-mon[53908]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:11 vm06 ceph-mon[48187]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:11 vm01 ceph-mon[49698]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:11 vm01 ceph-mon[53908]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:14.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:14.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:14.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:14 vm06 ceph-mon[48187]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:14.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:03:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:03:14] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T20:03:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:14 vm01 ceph-mon[49698]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:14.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:14 vm01 ceph-mon[53908]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:15 vm06 ceph-mon[48187]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:15 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:15 vm01 ceph-mon[49698]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:15 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:15 vm01 ceph-mon[53908]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:15 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:17.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:16.967Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:16.968Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:18.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:18 vm01 ceph-mon[49698]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:18.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:18 vm01 ceph-mon[53908]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:18.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:18 vm06 ceph-mon[48187]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:19.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:19 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:19.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:19 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:19.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:19 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:20.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:20 vm06 ceph-mon[48187]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:20 vm01 ceph-mon[49698]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:20.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:20 vm01 ceph-mon[53908]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:22.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:22 vm06 ceph-mon[48187]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:22.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:22 vm01 ceph-mon[53908]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:22.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:22 vm01 ceph-mon[49698]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:24.415 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:24.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:24 vm06 ceph-mon[48187]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:24.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:03:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:03:24] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T20:03:24.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:24 vm01 ceph-mon[49698]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:24.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:24 vm01 ceph-mon[53908]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:26 vm06 ceph-mon[48187]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:26 vm01 ceph-mon[53908]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:26 vm01 ceph-mon[49698]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:27.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:26 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:26.968Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:26.969Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:28 vm06 ceph-mon[48187]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:28.875 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:28 vm01 ceph-mon[49698]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:28.875 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:28 vm01 ceph-mon[53908]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:29.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:29 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:29.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:29 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:29 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:31.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:30 vm06 ceph-mon[48187]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:31.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:30 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:31.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:30 vm01 ceph-mon[53908]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:31.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:30 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:31.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:30 vm01 ceph-mon[49698]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:31.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:30 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:31 vm06 ceph-mon[48187]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:32.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:31 vm01 ceph-mon[49698]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:32.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:31 vm01 ceph-mon[53908]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:34.462 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:34.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:34.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:34 vm06 ceph-mon[48187]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:34.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:34 vm01 ceph-mon[49698]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:34.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:34 vm01 ceph-mon[53908]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:34.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:03:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:03:34] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-09T20:03:35.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:35 vm01 ceph-mon[49698]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:35.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:35 vm01 ceph-mon[53908]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:36.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:35 vm06 ceph-mon[48187]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:36.292 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 systemd[1]: Stopping Ceph mgr.y for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:03:36.558 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 podman[83441]: 2026-03-09 20:03:36.292551381 +0000 UTC m=+0.082063788 container died 4f3dd125b57b16cd5af684bb92e42a8accaca3a18bc05ef5ebdbd81318e0440f (image=quay.io/ceph/ceph:v17.2.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, ceph=True, maintainer=Guillaume Abrioux , distribution-scope=public, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, GIT_BRANCH=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, name=centos-stream, vcs-type=git, io.openshift.expose-services=, vendor=Red Hat, Inc., version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, RELEASE=HEAD, io.openshift.tags=base centos centos-stream, release=754, GIT_CLEAN=True, com.redhat.component=centos-stream-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T20:03:36.558 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 podman[83441]: 2026-03-09 20:03:36.40067087 +0000 UTC m=+0.190183277 container remove 4f3dd125b57b16cd5af684bb92e42a8accaca3a18bc05ef5ebdbd81318e0440f (image=quay.io/ceph/ceph:v17.2.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, vcs-type=git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, release=754, CEPH_POINT_RELEASE=-17.2.0, ceph=True, RELEASE=HEAD, architecture=x86_64, maintainer=Guillaume Abrioux , summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc., distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_BRANCH=HEAD, name=centos-stream, com.redhat.component=centos-stream-container, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_REPO=https://github.com/ceph/ceph-container.git, version=8, GIT_CLEAN=True, io.openshift.tags=base centos centos-stream) 2026-03-09T20:03:36.558 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 bash[83441]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y 2026-03-09T20:03:36.558 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.y.service: Main process exited, code=exited, status=143/n/a 2026-03-09T20:03:36.558 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.y.service: Failed with result 'exit-code'. 2026-03-09T20:03:36.558 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 systemd[1]: Stopped Ceph mgr.y for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:03:36.558 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.y.service: Consumed 36.559s CPU time. 2026-03-09T20:03:36.969 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 systemd[1]: Starting Ceph mgr.y for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:03:36.969 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 podman[83551]: 2026-03-09 20:03:36.799500043 +0000 UTC m=+0.020582059 container create 8f1a7cbf4da3092e47351e64a82179e2b8f2bf9eb6beee1710c9c5e77d84cd16 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, OSD_FLAVOR=default) 2026-03-09T20:03:36.969 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 podman[83551]: 2026-03-09 20:03:36.841319557 +0000 UTC m=+0.062401573 container init 8f1a7cbf4da3092e47351e64a82179e2b8f2bf9eb6beee1710c9c5e77d84cd16 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:03:36.969 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 podman[83551]: 2026-03-09 20:03:36.845689775 +0000 UTC m=+0.066771791 container start 8f1a7cbf4da3092e47351e64a82179e2b8f2bf9eb6beee1710c9c5e77d84cd16 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-09T20:03:36.969 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 bash[83551]: 8f1a7cbf4da3092e47351e64a82179e2b8f2bf9eb6beee1710c9c5e77d84cd16 2026-03-09T20:03:36.969 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 podman[83551]: 2026-03-09 20:03:36.791035522 +0000 UTC m=+0.012117539 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:03:36.969 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 systemd[1]: Started Ceph mgr.y for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:03:37.308 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:36.968+0000 7f7614e69140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T20:03:37.308 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:37 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:37.021+0000 7f7614e69140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T20:03:37.315 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:36.969Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:37.316 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:36.976Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:37.319 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:36 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:37.628 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:37 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:37.551+0000 7f7614e69140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[49698]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 74 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 48 op/s 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.024 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:37 vm01 ceph-mon[53908]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 74 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 48 op/s 2026-03-09T20:03:38.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:37 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:37 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:37 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:37 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:38.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:37 vm06 ceph-mon[48187]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 74 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 48 op/s 2026-03-09T20:03:38.275 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:38.125+0000 7f7614e69140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T20:03:38.276 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T20:03:38.276 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T20:03:38.276 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: from numpy import show_config as show_numpy_config 2026-03-09T20:03:38.552 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:38.274+0000 7f7614e69140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T20:03:38.552 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:38.351+0000 7f7614e69140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T20:03:38.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:38.547+0000 7f7614e69140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T20:03:39.284 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:39 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:39.284 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:39 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:39.284 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:39 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:39.559 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:39 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:39.323+0000 7f7614e69140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T20:03:39.559 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:39 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:39.461+0000 7f7614e69140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:03:39.559 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:39 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:39.509+0000 7f7614e69140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T20:03:39.559 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:39 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:39.556+0000 7f7614e69140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T20:03:39.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:39 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:39.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:39 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:39.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:39 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:39 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:39.610 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:39 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:39.610 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:39 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:39.838 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:39 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:39.615+0000 7f7614e69140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T20:03:39.838 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:39 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:39.656+0000 7f7614e69140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T20:03:40.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:39 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:39.891+0000 7f7614e69140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T20:03:40.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:39 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:39.947+0000 7f7614e69140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T20:03:40.511 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:40.202+0000 7f7614e69140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T20:03:40.825 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:40.511+0000 7f7614e69140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T20:03:40.825 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:40.562+0000 7f7614e69140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T20:03:40.825 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:40.608+0000 7f7614e69140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T20:03:40.825 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:40.694+0000 7f7614e69140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T20:03:40.825 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:40.736+0000 7f7614e69140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T20:03:41.124 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:40.825+0000 7f7614e69140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T20:03:41.124 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:40 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:40.958+0000 7f7614e69140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[49698]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 74 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 48 op/s 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[53908]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 74 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 48 op/s 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:03:41.125 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:40 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:41.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:40 vm06 ceph-mon[48187]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 74 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 48 op/s 2026-03-09T20:03:41.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:40 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:41.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:40 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:41.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:40 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:03:41.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:40 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:03:41.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:40 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:03:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:41 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:41.125+0000 7f7614e69140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T20:03:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:41 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:41.166+0000 7f7614e69140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T20:03:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:41 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:03:41] ENGINE Bus STARTING 2026-03-09T20:03:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:41 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: CherryPy Checker: 2026-03-09T20:03:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:41 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: The Application mounted at '' has an empty config. 2026-03-09T20:03:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:41 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:03:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:41 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:03:41] ENGINE Serving on http://:::9283 2026-03-09T20:03:41.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:03:41 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:03:41] ENGINE Bus STARTED 2026-03-09T20:03:42.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[49698]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 74 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 48 op/s 2026-03-09T20:03:42.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[49698]: Standby manager daemon y restarted 2026-03-09T20:03:42.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[49698]: Standby manager daemon y started 2026-03-09T20:03:42.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[53908]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 74 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 48 op/s 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[53908]: Standby manager daemon y restarted 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[53908]: Standby manager daemon y started 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T20:03:42.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:41 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:03:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:41 vm06 ceph-mon[48187]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 74 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 48 op/s 2026-03-09T20:03:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:41 vm06 ceph-mon[48187]: Standby manager daemon y restarted 2026-03-09T20:03:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:41 vm06 ceph-mon[48187]: Standby manager daemon y started 2026-03-09T20:03:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:41 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T20:03:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:41 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:03:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:41 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T20:03:42.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:41 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:03:43.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:42 vm01 ceph-mon[49698]: mgrmap e27: x(active, since 3m), standbys: y 2026-03-09T20:03:43.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:42 vm01 ceph-mon[53908]: mgrmap e27: x(active, since 3m), standbys: y 2026-03-09T20:03:43.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:42 vm06 ceph-mon[48187]: mgrmap e27: x(active, since 3m), standbys: y 2026-03-09T20:03:44.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:44.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:44 vm06 ceph-mon[48187]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T20:03:44.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:44 vm01 ceph-mon[49698]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T20:03:44.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:44 vm01 ceph-mon[53908]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T20:03:44.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:03:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:03:44] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-09T20:03:46.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:46 vm06 ceph-mon[48187]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T20:03:46.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:46 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:46.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:46 vm01 ceph-mon[49698]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T20:03:46.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:46 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:46.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:46 vm01 ceph-mon[53908]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T20:03:46.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:46 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:03:47.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:46.972Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:46.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:48.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:48 vm06 ceph-mon[48187]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T20:03:48.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:48 vm01 ceph-mon[53908]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T20:03:48.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:48 vm01 ceph-mon[49698]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T20:03:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:49 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:49.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:49 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:49.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:49 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:50 vm06 ceph-mon[48187]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s rd, 0 B/s wr, 72 op/s 2026-03-09T20:03:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:50 vm01 ceph-mon[49698]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s rd, 0 B/s wr, 72 op/s 2026-03-09T20:03:50.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:50 vm01 ceph-mon[53908]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s rd, 0 B/s wr, 72 op/s 2026-03-09T20:03:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:52 vm06 ceph-mon[48187]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s rd, 0 B/s wr, 72 op/s 2026-03-09T20:03:52.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:52 vm01 ceph-mon[49698]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s rd, 0 B/s wr, 72 op/s 2026-03-09T20:03:52.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:52 vm01 ceph-mon[53908]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s rd, 0 B/s wr, 72 op/s 2026-03-09T20:03:54.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:54.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:54.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:54 vm06 ceph-mon[48187]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s rd, 0 B/s wr, 72 op/s 2026-03-09T20:03:54.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:03:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:03:54] "GET /metrics HTTP/1.1" 200 37529 "" "Prometheus/2.51.0" 2026-03-09T20:03:54.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:54 vm01 ceph-mon[49698]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s rd, 0 B/s wr, 72 op/s 2026-03-09T20:03:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:54 vm01 ceph-mon[53908]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 44 KiB/s rd, 0 B/s wr, 72 op/s 2026-03-09T20:03:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:55 vm06 ceph-mon[48187]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:55.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:55 vm01 ceph-mon[49698]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:55.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:55 vm01 ceph-mon[53908]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:03:57.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:03:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:03:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:03:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:56.974Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:03:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:03:56.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:03:58.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:58 vm06 ceph-mon[48187]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:58 vm01 ceph-mon[49698]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:58.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:58 vm01 ceph-mon[53908]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:03:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:03:59 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:59.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:03:59 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:03:59.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:03:59 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:00.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:00 vm06 ceph-mon[48187]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:00.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:00 vm01 ceph-mon[49698]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:00.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:00 vm01 ceph-mon[53908]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:01.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:01 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:01.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:01 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:01.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:01 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:02.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:02 vm06 ceph-mon[48187]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:02.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:02 vm01 ceph-mon[49698]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:02.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:02 vm01 ceph-mon[53908]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:04.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:04 vm06 ceph-mon[48187]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:04.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:04.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:04 vm01 ceph-mon[49698]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:04.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:04 vm01 ceph-mon[53908]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:04.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:04:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:04:04] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T20:04:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:06 vm06 ceph-mon[48187]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:06.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:06 vm01 ceph-mon[49698]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:06.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:06 vm01 ceph-mon[53908]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:07.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:06 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:04:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:04:06.974Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:04:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:04:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:04:06.975Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:04:08.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:08 vm06 ceph-mon[48187]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:08.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:08 vm01 ceph-mon[49698]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:08.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:08 vm01 ceph-mon[53908]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:09.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:09 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:09.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:09 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:09.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:09 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:10.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:10 vm01 ceph-mon[49698]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:10.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:10 vm01 ceph-mon[53908]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:10 vm06 ceph-mon[48187]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:12.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:12 vm01 ceph-mon[49698]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:12.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:12 vm01 ceph-mon[53908]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:12.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:12 vm06 ceph-mon[48187]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:14.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:14 vm06 ceph-mon[48187]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:14.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:14.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:14 vm01 ceph-mon[49698]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:14.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:14 vm01 ceph-mon[53908]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:14.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:04:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:04:14] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T20:04:15.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:15 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:15.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:15 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:15 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:16 vm06 ceph-mon[48187]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:16 vm01 ceph-mon[49698]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:16.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:16 vm01 ceph-mon[53908]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:17.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:04:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:04:16.975Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:04:18.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:17 vm06 ceph-mon[48187]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:18.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:17 vm01 ceph-mon[49698]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:18.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:17 vm01 ceph-mon[53908]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:19.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:19 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:19.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:19 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:19.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:19 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:20 vm06 ceph-mon[48187]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:20 vm01 ceph-mon[49698]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:20 vm01 ceph-mon[53908]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:22 vm06 ceph-mon[48187]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:22 vm01 ceph-mon[49698]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:22 vm01 ceph-mon[53908]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:24.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:24.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:24.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:24 vm06 ceph-mon[48187]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:24.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:24 vm01 ceph-mon[49698]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:24.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:24 vm01 ceph-mon[53908]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:24.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:04:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:04:24] "GET /metrics HTTP/1.1" 200 37531 "" "Prometheus/2.51.0" 2026-03-09T20:04:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:26 vm06 ceph-mon[48187]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:26 vm01 ceph-mon[49698]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:26 vm01 ceph-mon[53908]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:27.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:26 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:28.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:28 vm06 ceph-mon[48187]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:28.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:28 vm01 ceph-mon[49698]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:28.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:28 vm01 ceph-mon[53908]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:29.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:29 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:29.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:29 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:29.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:29 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:30 vm06 ceph-mon[48187]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:30.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:30 vm01 ceph-mon[49698]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:30.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:30 vm01 ceph-mon[53908]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:31 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:31.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:31 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:31.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:31 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:32.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:32 vm06 ceph-mon[48187]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:32.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:32 vm01 ceph-mon[49698]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:32.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:32 vm01 ceph-mon[53908]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:34.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:34 vm06 ceph-mon[48187]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:34.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:34.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:34.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:34 vm01 ceph-mon[49698]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:34.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:34 vm01 ceph-mon[53908]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:34.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:04:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:04:34] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T20:04:36.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:36 vm06 ceph-mon[48187]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:36 vm01 ceph-mon[49698]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:36 vm01 ceph-mon[53908]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:37.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:36 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:38 vm06 ceph-mon[48187]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:38.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:38 vm01 ceph-mon[49698]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:38.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:38 vm01 ceph-mon[53908]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:39 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:39 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:39.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:39 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:40.607 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:40 vm01 ceph-mon[53908]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:40.608 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:40 vm01 ceph-mon[49698]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:40 vm06 ceph-mon[48187]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[49698]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[53908]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:04:42.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:42 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:04:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:42 vm06 ceph-mon[48187]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:42 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:04:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:42 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:04:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:42 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:04:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:42 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:04:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:42 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:04:44.463 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:44.463 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:44 vm06 ceph-mon[48187]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:44.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:44 vm01 ceph-mon[49698]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:44.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:44 vm01 ceph-mon[53908]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:44.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:04:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:04:44] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T20:04:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:45.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:45.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:04:46.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:46 vm01 ceph-mon[49698]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:46.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:46 vm01 ceph-mon[53908]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:46.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:46 vm06 ceph-mon[48187]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:47.341 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:46 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:48.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:48 vm01 ceph-mon[49698]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:48.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:48 vm01 ceph-mon[53908]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:48.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:48 vm06 ceph-mon[48187]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:49.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:49 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:49.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:49 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:49.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:49 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:50 vm01 ceph-mon[49698]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:50.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:50 vm01 ceph-mon[53908]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:50.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:50 vm06 ceph-mon[48187]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:52 vm06 ceph-mon[48187]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:52.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:52 vm01 ceph-mon[49698]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:52.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:52 vm01 ceph-mon[53908]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:54.415 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.101\", device_class=\"hdd\", hostname=\"vm01\", instance=\"192.168.123.106:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.101\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:54.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:54 vm06 ceph-mon[48187]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:54.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:04:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:04:54] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T20:04:54.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:54 vm01 ceph-mon[49698]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:54 vm01 ceph-mon[53908]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:56 vm06 ceph-mon[48187]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:56 vm01 ceph-mon[49698]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:56.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:56 vm01 ceph-mon[53908]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:04:57.342 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:04:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:04:56.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm01\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"0d36e446-1bf1-11f1-a427-9918c6d73ee3\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm01\", job=\"node\", machine=\"x86_64\", nodename=\"vm01\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T20:04:58.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:58 vm06 ceph-mon[48187]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:58.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:58 vm01 ceph-mon[49698]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:58.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:58 vm01 ceph-mon[53908]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:04:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:04:59 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:04:59 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:04:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:04:59 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:00.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:00 vm06 ceph-mon[48187]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:00.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:00 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:00.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:00 vm01 ceph-mon[49698]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:00.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:00 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:00.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:00 vm01 ceph-mon[53908]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:00.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:00 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:02 vm06 ceph-mon[48187]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:02 vm01 ceph-mon[49698]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:02.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:02 vm01 ceph-mon[53908]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:04.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:04 vm06 ceph-mon[48187]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:04.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:05:04] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-09T20:05:04.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:04 vm01 ceph-mon[49698]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:04.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:04 vm01 ceph-mon[53908]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:06 vm06 ceph-mon[48187]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:06.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:06 vm01 ceph-mon[49698]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:06.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:06 vm01 ceph-mon[53908]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:06.949Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:08.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:08 vm06 ceph-mon[48187]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:08.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:08 vm01 ceph-mon[49698]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:08.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:08 vm01 ceph-mon[53908]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:09 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:09 vm06 ceph-mon[48187]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:09 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:09 vm01 ceph-mon[49698]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:09.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:09 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:09.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:09 vm01 ceph-mon[53908]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:12.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:12 vm06 ceph-mon[48187]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:12.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:12 vm01 ceph-mon[49698]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:12.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:12 vm01 ceph-mon[53908]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:14.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:14 vm06 ceph-mon[48187]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:14.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:05:14] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-09T20:05:14.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:14 vm01 ceph-mon[49698]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:14.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:14 vm01 ceph-mon[53908]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:16 vm06 ceph-mon[48187]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:16 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:16.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:16 vm01 ceph-mon[49698]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:16.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:16 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:16 vm01 ceph-mon[53908]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:16 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:16.949Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:16.950Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:18.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:18 vm06 ceph-mon[48187]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:18.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:18 vm01 ceph-mon[49698]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:18.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:18 vm01 ceph-mon[53908]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:19.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:19 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:19.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:19 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:19.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:19 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:20.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:20 vm06 ceph-mon[48187]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:20 vm01 ceph-mon[49698]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:20.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:20 vm01 ceph-mon[53908]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:22.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:22 vm06 ceph-mon[48187]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:22.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:22 vm01 ceph-mon[49698]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:22.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:22 vm01 ceph-mon[53908]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:24 vm06 ceph-mon[48187]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:24.592 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:05:24] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-09T20:05:24.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:24 vm01 ceph-mon[49698]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:24.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:24 vm01 ceph-mon[53908]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:26 vm06 ceph-mon[48187]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:26 vm01 ceph-mon[49698]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:26 vm01 ceph-mon[53908]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:26.950Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:26.951Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:28.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:28 vm06 ceph-mon[48187]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:28.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:28 vm01 ceph-mon[49698]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:28.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:28 vm01 ceph-mon[53908]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:29.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:29 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:29.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:29 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:29 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:30.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:30 vm01 ceph-mon[49698]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:30.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:30 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:30.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:30 vm01 ceph-mon[53908]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:30.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:30 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:30.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:30 vm06 ceph-mon[48187]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:30 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:32.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:32 vm01 ceph-mon[49698]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:32.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:32 vm01 ceph-mon[53908]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:32.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:32 vm06 ceph-mon[48187]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:34.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:34 vm01 ceph-mon[49698]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:34.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:34 vm01 ceph-mon[53908]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:34.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:34 vm06 ceph-mon[48187]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:34.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:05:34] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T20:05:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:36 vm01 ceph-mon[49698]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:36 vm01 ceph-mon[53908]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:36.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:36 vm06 ceph-mon[48187]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:36.950Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:36.951Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:38.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:38 vm01 ceph-mon[49698]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:38.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:38 vm01 ceph-mon[53908]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:38.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:38 vm06 ceph-mon[48187]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:39 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:39 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:39.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:39 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:40.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:40 vm01 ceph-mon[49698]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:40.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:40 vm01 ceph-mon[53908]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:40 vm06 ceph-mon[48187]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:42 vm06 ceph-mon[48187]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:42 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:42 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:05:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:42 vm06 ceph-mon[48187]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:05:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:42 vm01 ceph-mon[49698]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:42 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:42 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:05:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:42 vm01 ceph-mon[49698]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:05:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:42 vm01 ceph-mon[53908]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:42 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:42 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:05:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:42 vm01 ceph-mon[53908]: from='mgr.24686 ' entity='mgr.x' 2026-03-09T20:05:44.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:44 vm06 ceph-mon[48187]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:44.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:05:44] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T20:05:44.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:44 vm01 ceph-mon[49698]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:44.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:44 vm01 ceph-mon[53908]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:45.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:45 vm06 ceph-mon[48187]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:45.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:45 vm01 ceph-mon[49698]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:45.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:45 vm01 ceph-mon[53908]: from='mgr.24686 192.168.123.106:0/1915791181' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:46.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:46 vm06 ceph-mon[48187]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:46.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:46 vm01 ceph-mon[49698]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:46.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:46 vm01 ceph-mon[53908]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:46.951Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:46.952Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:47.586 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (5m) 66s ago 12m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (5m) 2m ago 12m 47.9M - dad864ee21e9 694e845e2157 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (5m) 66s ago 12m 48.4M - 3.5 e1d6a67b021e 5e886c64eb22 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283 running (8m) 2m ago 14m 560M - 19.2.3-678-ge911bdeb 654f31e6858e 0b7c9ce0e145 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (2m) 66s ago 14m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (14m) 66s ago 14m 63.0M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (14m) 2m ago 14m 45.4M 2048M 17.2.0 e1d6a67b021e 9f080c80b1b8 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (14m) 66s ago 14m 45.4M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (5m) 66s ago 12m 10.0M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (5m) 2m ago 12m 9.77M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (13m) 66s ago 13m 54.6M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (13m) 66s ago 13m 54.2M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (13m) 66s ago 13m 49.9M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (13m) 66s ago 13m 53.2M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (13m) 2m ago 13m 55.5M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:05:48.122 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (13m) 2m ago 13m 52.7M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:05:48.123 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (13m) 2m ago 13m 49.6M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:05:48.123 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (12m) 2m ago 12m 53.5M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:05:48.123 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (5m) 2m ago 12m 50.4M - 2.51.0 1d3b7f56885b 1ff5c193e113 2026-03-09T20:05:48.123 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (12m) 66s ago 12m 92.7M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:05:48.123 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (12m) 2m ago 12m 91.7M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:05:48.471 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T20:05:48.661 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:48 vm01 ceph-mon[49698]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:48.661 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:48 vm01 ceph-mon[53908]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:48.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:48 vm06 ceph-mon[48187]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "mds": {}, 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:05:49.022 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:05:49.102 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:05:49.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:49 vm01 ceph-mon[49698]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:49.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:49 vm01 ceph-mon[49698]: from='client.24874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:05:49.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:49 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2982041466' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:05:49.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:49 vm01 ceph-mon[53908]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:49.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:49 vm01 ceph-mon[53908]: from='client.24874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:05:49.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:49 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2982041466' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:05:49.732 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T20:05:49.802 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-09T20:05:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:49 vm06 ceph-mon[48187]: from='client.14907 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:05:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:49 vm06 ceph-mon[48187]: from='client.24874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:05:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:49 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2982041466' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: cluster: 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: id: 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: health: HEALTH_OK 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: services: 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: mon: 3 daemons, quorum a,c,b (age 14m) 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: mgr: x(active, since 6m), standbys: y 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: osd: 8 osds: 8 up (since 12m), 8 in (since 13m) 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: data: 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: pools: 6 pools, 161 pgs 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: objects: 209 objects, 457 KiB 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: usage: 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: pgs: 161 active+clean 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: io: 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: client: 853 B/s rd, 0 op/s rd, 0 op/s wr 2026-03-09T20:05:50.381 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T20:05:50.450 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph mgr fail' 2026-03-09T20:05:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:50 vm01 ceph-mon[49698]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:50 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1100079355' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:05:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:50 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1980543248' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T20:05:50.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:50 vm01 ceph-mon[53908]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:50.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:50 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1100079355' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:05:50.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:50 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1980543248' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T20:05:50.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:50 vm06 ceph-mon[48187]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:05:50.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:50 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1100079355' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:05:50.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:50 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1980543248' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T20:05:51.550 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T20:05:51.700 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:51 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/4040646277' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:05:51.700 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:51 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:05:51.700 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:51 vm01 ceph-mon[49698]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T20:05:51.700 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:51 vm01 ceph-mon[49698]: Standby manager daemon x started 2026-03-09T20:05:51.700 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:51 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:51] ENGINE Bus STOPPING 2026-03-09T20:05:51.701 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:51 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/4040646277' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:05:51.701 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:51 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:05:51.701 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:51 vm01 ceph-mon[53908]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T20:05:51.701 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:51 vm01 ceph-mon[53908]: Standby manager daemon x started 2026-03-09T20:05:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:51 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/4040646277' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:05:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:51 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:05:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:51 vm06 ceph-mon[48187]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T20:05:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:51 vm06 ceph-mon[48187]: Standby manager daemon x started 2026-03-09T20:05:51.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ignoring --setuser ceph since I am not root 2026-03-09T20:05:51.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ignoring --setgroup ceph since I am not root 2026-03-09T20:05:51.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:51 vm06 ceph-mgr[66408]: -- 192.168.123.106:0/2686913686 <== mon.2 v2:192.168.123.106:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x55e5f4aa74a0 con 0x55e5f4a85000 2026-03-09T20:05:51.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:51.589+0000 7f53e944f140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T20:05:51.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:51 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:51.632+0000 7f53e944f140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T20:05:51.964 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:51 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:51] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:05:51.964 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:51 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:51] ENGINE Bus STOPPED 2026-03-09T20:05:52.329 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:52 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:52.043+0000 7f53e944f140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T20:05:52.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:51 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:51] ENGINE Bus STARTING 2026-03-09T20:05:52.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:52] ENGINE Serving on http://:::9283 2026-03-09T20:05:52.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:52] ENGINE Bus STARTED 2026-03-09T20:05:52.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:05:52.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:05:52.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:05:52.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:05:52.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:05:52.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: mgrmap e28: y(active, starting, since 0.479115s), standbys: x 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: Manager daemon y is now available 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:52 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:52 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:52.381+0000 7f53e944f140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:52 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:52 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:52 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: from numpy import show_config as show_numpy_config 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:52 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:52.497+0000 7f53e944f140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T20:05:52.594 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:52 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:52.535+0000 7f53e944f140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:05:52.799 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: mgrmap e28: y(active, starting, since 0.479115s), standbys: x 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: Manager daemon y is now available 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:05:52.800 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: mgrmap e28: y(active, starting, since 0.479115s), standbys: x 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: Manager daemon y is now available 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:05:52.802 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:52 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:05:52.912 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:52 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:52.614+0000 7f53e944f140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T20:05:53.460 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:53 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:53.200+0000 7f53e944f140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T20:05:53.460 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:53 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:53.327+0000 7f53e944f140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:05:53.460 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:53 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:53.379+0000 7f53e944f140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T20:05:53.460 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:53 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:53.416+0000 7f53e944f140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T20:05:53.460 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:53 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:53.457+0000 7f53e944f140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T20:05:53.472 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:53 vm01 ceph-mon[49698]: mgrmap e29: y(active, since 1.50389s), standbys: x 2026-03-09T20:05:53.472 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:53 vm01 ceph-mon[49698]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:53.735 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:53 vm06 ceph-mon[48187]: mgrmap e29: y(active, since 1.50389s), standbys: x 2026-03-09T20:05:53.735 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:53 vm06 ceph-mon[48187]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:53.736 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:53 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:53.522+0000 7f53e944f140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T20:05:53.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:53 vm01 ceph-mon[53908]: mgrmap e29: y(active, since 1.50389s), standbys: x 2026-03-09T20:05:53.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:53 vm01 ceph-mon[53908]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:54.068 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:53 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:53.751+0000 7f53e944f140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T20:05:54.068 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:53 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:53.810+0000 7f53e944f140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T20:05:54.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:54.066+0000 7f53e944f140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T20:05:54.720 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: [09/Mar/2026:20:05:52] ENGINE Bus STARTING 2026-03-09T20:05:54.720 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: [09/Mar/2026:20:05:53] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T20:05:54.720 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: [09/Mar/2026:20:05:53] ENGINE Client ('192.168.123.101', 49386) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:05:54.720 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: [09/Mar/2026:20:05:53] ENGINE Serving on http://192.168.123.101:8765 2026-03-09T20:05:54.720 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: [09/Mar/2026:20:05:53] ENGINE Bus STARTED 2026-03-09T20:05:54.720 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:54.720 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: mgrmap e30: y(active, since 2s), standbys: x 2026-03-09T20:05:54.720 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.720 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:54 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:54.383+0000 7f53e944f140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:54.440+0000 7f53e944f140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:54.491+0000 7f53e944f140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:54.585+0000 7f53e944f140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:54.632+0000 7f53e944f140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T20:05:54.721 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:54.718+0000 7f53e944f140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: [09/Mar/2026:20:05:52] ENGINE Bus STARTING 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: [09/Mar/2026:20:05:53] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: [09/Mar/2026:20:05:53] ENGINE Client ('192.168.123.101', 49386) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: [09/Mar/2026:20:05:53] ENGINE Serving on http://192.168.123.101:8765 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: [09/Mar/2026:20:05:53] ENGINE Bus STARTED 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: mgrmap e30: y(active, since 2s), standbys: x 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: [09/Mar/2026:20:05:52] ENGINE Bus STARTING 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: [09/Mar/2026:20:05:53] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: [09/Mar/2026:20:05:53] ENGINE Client ('192.168.123.101', 49386) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: [09/Mar/2026:20:05:53] ENGINE Serving on http://192.168.123.101:8765 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: [09/Mar/2026:20:05:53] ENGINE Bus STARTED 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: mgrmap e30: y(active, since 2s), standbys: x 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:54.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:54 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:05:55.013 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:54.846+0000 7f53e944f140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:05:55.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:55.012+0000 7f53e944f140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T20:05:55.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:55.057+0000 7f53e944f140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T20:05:55.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:05:55] ENGINE Bus STARTING 2026-03-09T20:05:55.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: CherryPy Checker: 2026-03-09T20:05:55.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: The Application mounted at '' has an empty config. 2026-03-09T20:05:55.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:05:55.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:05:55] ENGINE Serving on http://:::9283 2026-03-09T20:05:55.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:05:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:05:55] ENGINE Bus STARTED 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.520 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Standby manager daemon x restarted 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: Standby manager daemon x started 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Standby manager daemon x restarted 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: Standby manager daemon x started 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:05:55.521 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:55 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Standby manager daemon x restarted 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: Standby manager daemon x started 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:05:55.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:55 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:56.491 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 systemd[1]: Stopping Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:05:56.757 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.491Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T20:05:56.757 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.492Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-09T20:05:56.757 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.492Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-09T20:05:56.757 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.492Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T20:05:56.757 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.492Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-09T20:05:56.757 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.492Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-09T20:05:56.757 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.492Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T20:05:56.757 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.492Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-09T20:05:56.757 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.494Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.495Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.495Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[69742]: ts=2026-03-09T20:05:56.495Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 podman[73520]: 2026-03-09 20:05:56.50229009 +0000 UTC m=+0.024642895 container died 1ff5c193e113607d973110731bbae76b6e8878677cccfa434e9aa1a32991ac14 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 podman[73520]: 2026-03-09 20:05:56.525551929 +0000 UTC m=+0.047904734 container remove 1ff5c193e113607d973110731bbae76b6e8878677cccfa434e9aa1a32991ac14 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 bash[73520]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a.service: Deactivated successfully. 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 systemd[1]: Stopped Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a.service: Consumed 1.024s CPU time. 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 systemd[1]: Starting Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:05:56.758 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 podman[73588]: 2026-03-09 20:05:56.740084078 +0000 UTC m=+0.028888431 container create 3ca8b1f52c41587b5ae235941294c192d6801c074946640ca624eee007cdc67f (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 podman[73588]: 2026-03-09 20:05:56.765977143 +0000 UTC m=+0.054781505 container init 3ca8b1f52c41587b5ae235941294c192d6801c074946640ca624eee007cdc67f (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 podman[73588]: 2026-03-09 20:05:56.768948685 +0000 UTC m=+0.057753038 container start 3ca8b1f52c41587b5ae235941294c192d6801c074946640ca624eee007cdc67f (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 bash[73588]: 3ca8b1f52c41587b5ae235941294c192d6801c074946640ca624eee007cdc67f 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 podman[73588]: 2026-03-09 20:05:56.729344246 +0000 UTC m=+0.018148599 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 systemd[1]: Started Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.800Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.800Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.800Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm06 (none))" 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.800Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.800Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.803Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.803Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.806Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T20:05:57.092 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.806Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.807Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.808Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=418.153µs 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.808Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.815Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=3 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.820Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=3 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.826Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=3 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.826Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=3 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.826Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=21.541µs wal_replay_duration=18.115716ms wbl_replay_duration=130ns total_replay_duration=18.657681ms 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.831Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.831Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.831Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.841Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=10.110233ms db_storage=1.232µs remote_storage=1.122µs web_handler=471ns query_engine=832ns scrape=479.647µs scrape_sd=83.627µs notify=7.313µs notify_sd=6.632µs rules=9.227452ms tracing=5.2µs 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.841Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T20:05:57.093 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:05:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:05:56.841Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: mgrmap e31: y(active, since 5s), standbys: x 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2453320707' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:56 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:05:57.097 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:56] ENGINE Bus STOPPING 2026-03-09T20:05:57.097 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:57] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:05:57.097 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:57] ENGINE Bus STOPPED 2026-03-09T20:05:57.097 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:56.952Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:57.097 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:05:56.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: mgrmap e31: y(active, since 5s), standbys: x 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2453320707' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: mgrmap e31: y(active, since 5s), standbys: x 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2453320707' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:57.100 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:56 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:05:57.372 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:57] ENGINE Bus STARTING 2026-03-09T20:05:57.372 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:57] ENGINE Serving on http://:::9283 2026-03-09T20:05:57.372 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:05:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:05:57] ENGINE Bus STARTED 2026-03-09T20:05:58.026 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: Adding iSCSI gateway http://:@192.168.123.101:5000 to Dashboard 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: Adding iSCSI gateway http://:@192.168.123.101:5000 to Dashboard 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:05:58.027 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:57 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: Adding iSCSI gateway http://:@192.168.123.101:5000 to Dashboard 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm01"}]: dispatch 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:05:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:57 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[49698]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:59.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:05:59.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[53908]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:05:59.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:05:58 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:58 vm06 ceph-mon[48187]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:05:59.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:58 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:58 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:58 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:58 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:05:59.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:58 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:05:59.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:58 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:05:59.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:05:58 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:06:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:00 vm01 ceph-mon[49698]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T20:06:01.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:00 vm01 ceph-mon[53908]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T20:06:01.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:00 vm06 ceph-mon[48187]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T20:06:02.874 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:06:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:06:02] "GET /metrics HTTP/1.1" 200 34749 "" "Prometheus/2.51.0" 2026-03-09T20:06:03.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:02 vm01 ceph-mon[49698]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T20:06:03.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:02 vm01 ceph-mon[53908]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T20:06:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:02 vm06 ceph-mon[48187]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T20:06:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:04 vm06 ceph-mon[48187]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:04.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:04 vm01 ceph-mon[49698]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:04.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:04 vm01 ceph-mon[53908]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:06 vm06 ceph-mon[48187]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:06.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:06 vm01 ceph-mon[49698]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:06.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:06 vm01 ceph-mon[53908]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:06.953Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:06.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:07 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:07 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:07 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:07 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:07.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:07 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:07.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:07 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:08.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:08 vm06 ceph-mon[48187]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:08.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:08 vm01 ceph-mon[49698]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:08.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:08 vm01 ceph-mon[53908]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:10 vm06 ceph-mon[48187]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:10.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:10 vm01 ceph-mon[49698]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:10.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:10 vm01 ceph-mon[53908]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:06:12.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:12 vm06 ceph-mon[48187]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:12.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:12 vm01 ceph-mon[49698]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:12.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:06:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:06:12] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-09T20:06:12.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:12 vm01 ceph-mon[53908]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:14.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:14 vm06 ceph-mon[48187]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:14.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:14 vm01 ceph-mon[49698]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:14.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:14 vm01 ceph-mon[53908]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:16 vm06 ceph-mon[48187]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:16 vm01 ceph-mon[49698]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:16.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:16 vm01 ceph-mon[53908]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:16.953Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:16.954Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:17 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:17 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:17 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:18.787 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:18 vm01 ceph-mon[49698]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:18.787 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:18 vm01 ceph-mon[53908]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:18.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:18 vm06 ceph-mon[48187]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:20.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:20 vm06 ceph-mon[48187]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:20 vm01 ceph-mon[49698]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:20 vm01 ceph-mon[53908]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:22.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:22 vm06 ceph-mon[48187]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:22 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:06:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:06:22] "GET /metrics HTTP/1.1" 200 37545 "" "Prometheus/2.51.0" 2026-03-09T20:06:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:22 vm01 ceph-mon[49698]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:22 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:22 vm01 ceph-mon[53908]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:22 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:24.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:24 vm06 ceph-mon[48187]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:24.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:24 vm01 ceph-mon[49698]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:24.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:24 vm01 ceph-mon[53908]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:26 vm06 ceph-mon[48187]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:26 vm01 ceph-mon[49698]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:26 vm01 ceph-mon[53908]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:26.953Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:26.954Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:27 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:27.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:27 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:27.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:27 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:28.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:28 vm06 ceph-mon[48187]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:28.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:28 vm01 ceph-mon[49698]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:28.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:28 vm01 ceph-mon[53908]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:30 vm06 ceph-mon[48187]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:30 vm01 ceph-mon[53908]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:30 vm01 ceph-mon[49698]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:32.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:06:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:06:32] "GET /metrics HTTP/1.1" 200 37545 "" "Prometheus/2.51.0" 2026-03-09T20:06:32.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:32 vm01 ceph-mon[53908]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:32.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:32 vm01 ceph-mon[49698]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:33.091 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:32 vm06 ceph-mon[48187]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:34.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:34 vm06 ceph-mon[48187]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:34.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:34 vm01 ceph-mon[49698]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:34.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:34 vm01 ceph-mon[53908]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:36.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:36 vm06 ceph-mon[48187]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:36.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:36 vm01 ceph-mon[49698]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:36.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:36 vm01 ceph-mon[53908]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:36.953Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:36.954Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:37 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:37 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:37 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:37 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:37.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:37 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:37.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:37 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:38.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:38 vm06 ceph-mon[48187]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:38.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:38 vm01 ceph-mon[49698]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:38.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:38 vm01 ceph-mon[53908]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:40 vm06 ceph-mon[48187]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:40.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:40 vm01 ceph-mon[49698]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:40.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:40 vm01 ceph-mon[53908]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:42 vm06 ceph-mon[48187]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:42.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:06:42 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:06:42] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-09T20:06:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:42 vm01 ceph-mon[49698]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:42 vm01 ceph-mon[53908]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:44.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:44 vm06 ceph-mon[48187]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:44.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:44 vm01 ceph-mon[49698]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:44.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:44 vm01 ceph-mon[53908]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:46.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:46 vm06 ceph-mon[48187]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:46.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:46 vm01 ceph-mon[49698]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:46.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:46 vm01 ceph-mon[53908]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:46.954Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:46.955Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:47 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:47.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:47 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:47.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:47 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:48.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:48 vm06 ceph-mon[48187]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:48.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:48 vm01 ceph-mon[53908]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:48.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:48 vm01 ceph-mon[49698]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:50.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:50 vm06 ceph-mon[48187]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:50.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:50 vm01 ceph-mon[53908]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:50.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:50 vm01 ceph-mon[49698]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:52 vm06 ceph-mon[48187]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:52.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:06:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:06:52] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T20:06:52.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:52 vm01 ceph-mon[53908]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:52.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:52.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:52 vm01 ceph-mon[49698]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:52.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:06:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:54 vm06 ceph-mon[48187]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:54.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:54 vm01 ceph-mon[53908]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:54.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:54 vm01 ceph-mon[49698]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:06:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:56 vm06 ceph-mon[48187]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:56.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:56 vm01 ceph-mon[53908]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:56 vm01 ceph-mon[49698]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:56.955Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:06:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:06:56.956Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:06:57.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:57 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:57.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:57 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:57 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:06:58.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:58 vm06 ceph-mon[48187]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:58.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:58 vm01 ceph-mon[49698]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:58.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:58 vm01 ceph-mon[53908]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:06:59.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:59 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:06:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:59 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:06:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:06:59 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:06:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:59 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:06:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:59 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:06:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:06:59 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:06:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:59 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:06:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:59 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:06:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:06:59 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:07:00.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:00 vm06 ceph-mon[48187]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:00.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:00 vm01 ceph-mon[53908]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:00.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:00 vm01 ceph-mon[49698]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:02.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:02 vm06 ceph-mon[48187]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:02.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:07:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:07:02] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T20:07:02.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:02 vm01 ceph-mon[53908]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:02 vm01 ceph-mon[49698]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:04 vm06 ceph-mon[48187]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:04.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:04 vm01 ceph-mon[49698]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:04.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:04 vm01 ceph-mon[53908]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:06 vm06 ceph-mon[48187]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:06.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:06 vm01 ceph-mon[49698]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:06.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:06 vm01 ceph-mon[53908]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:06.956Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:06.956Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:07 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:07 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:07 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:07 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:07.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:07 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:07.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:07 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:08.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:08 vm06 ceph-mon[48187]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:08.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:08 vm01 ceph-mon[49698]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:08.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:08 vm01 ceph-mon[53908]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:10 vm06 ceph-mon[48187]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:10.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:10 vm01 ceph-mon[49698]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:10.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:10 vm01 ceph-mon[53908]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:12.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:12 vm06 ceph-mon[48187]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:12.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:07:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:07:12] "GET /metrics HTTP/1.1" 200 37539 "" "Prometheus/2.51.0" 2026-03-09T20:07:12.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:12 vm01 ceph-mon[49698]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:12.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:12 vm01 ceph-mon[53908]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:14.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:14 vm06 ceph-mon[48187]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:14.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:14 vm01 ceph-mon[49698]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:14.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:14 vm01 ceph-mon[53908]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:16 vm06 ceph-mon[48187]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:16 vm01 ceph-mon[49698]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:16.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:16 vm01 ceph-mon[53908]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:16.956Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:16.957Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:17.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:17 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:17 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:17 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:18 vm06 ceph-mon[48187]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:18 vm01 ceph-mon[49698]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:18.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:18 vm01 ceph-mon[53908]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:20 vm06 ceph-mon[48187]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:20 vm01 ceph-mon[49698]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:20 vm01 ceph-mon[53908]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:22 vm06 ceph-mon[48187]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:22 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:07:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:07:22] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T20:07:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:22 vm01 ceph-mon[49698]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:22 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:22 vm01 ceph-mon[53908]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:22 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:24 vm06 ceph-mon[48187]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:24.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:24 vm01 ceph-mon[49698]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:24.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:24 vm01 ceph-mon[53908]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:26.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:26 vm06 ceph-mon[48187]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:26 vm01 ceph-mon[49698]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:26 vm01 ceph-mon[53908]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:26.957Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:26.958Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:27.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:27 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:27.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:27 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:27.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:27 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:28.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:28 vm06 ceph-mon[48187]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:28.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:28 vm01 ceph-mon[49698]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:28.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:28 vm01 ceph-mon[53908]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:30.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:30 vm06 ceph-mon[48187]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:30 vm01 ceph-mon[49698]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:30 vm01 ceph-mon[53908]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:32.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:32 vm06 ceph-mon[48187]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:32.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:07:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:07:32] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T20:07:32.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:32 vm01 ceph-mon[49698]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:32.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:32 vm01 ceph-mon[53908]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:34.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:34 vm06 ceph-mon[48187]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:34.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:34 vm01 ceph-mon[49698]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:34.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:34 vm01 ceph-mon[53908]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:36.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:36 vm06 ceph-mon[48187]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:36.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:36 vm01 ceph-mon[49698]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:36.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:36 vm01 ceph-mon[53908]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:36.957Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:36.958Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:37 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:37 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:37 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:37 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:37.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:37 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:37.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:37 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:38.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:38 vm06 ceph-mon[48187]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:38.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:38 vm01 ceph-mon[49698]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:38.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:38 vm01 ceph-mon[53908]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:40 vm06 ceph-mon[48187]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:40.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:40 vm01 ceph-mon[49698]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:40.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:40 vm01 ceph-mon[53908]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:42.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:42 vm06 ceph-mon[48187]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:42.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:07:42 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:07:42] "GET /metrics HTTP/1.1" 200 37539 "" "Prometheus/2.51.0" 2026-03-09T20:07:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:42 vm01 ceph-mon[49698]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:42 vm01 ceph-mon[53908]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:44.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:44 vm06 ceph-mon[48187]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:44.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:44 vm01 ceph-mon[49698]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:44.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:44 vm01 ceph-mon[53908]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:46.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:46 vm06 ceph-mon[48187]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:46.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:46 vm01 ceph-mon[49698]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:46.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:46 vm01 ceph-mon[53908]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:46.958Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:46.959Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:47 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:47.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:47 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:47.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:47 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:48.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:48 vm06 ceph-mon[48187]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:48.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:48 vm01 ceph-mon[49698]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:48.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:48 vm01 ceph-mon[53908]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:50.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:50 vm06 ceph-mon[48187]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:50.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:50 vm01 ceph-mon[49698]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:50.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:50 vm01 ceph-mon[53908]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:52 vm06 ceph-mon[48187]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:52.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:07:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:07:52] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-09T20:07:52.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:52 vm01 ceph-mon[49698]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:52.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:52.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:52 vm01 ceph-mon[53908]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:52.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:07:54.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:54 vm01 ceph-mon[49698]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:54.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:54 vm01 ceph-mon[53908]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:54.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:54 vm06 ceph-mon[48187]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:56 vm06 ceph-mon[48187]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:56 vm01 ceph-mon[49698]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:56.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:56 vm01 ceph-mon[53908]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:07:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:56.959Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:07:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:07:56.960Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:07:57.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:57 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:57 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:57.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:57 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:07:58.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:58 vm06 ceph-mon[48187]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:58.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:58 vm01 ceph-mon[49698]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:58.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:58 vm01 ceph-mon[53908]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:07:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:59 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:07:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:59 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:07:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:07:59 vm06 ceph-mon[48187]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:07:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:59 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:07:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:59 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:07:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:07:59 vm01 ceph-mon[49698]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:07:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:59 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:07:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:59 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:07:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:07:59 vm01 ceph-mon[53908]: from='mgr.24868 ' entity='mgr.y' 2026-03-09T20:08:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:00 vm01 ceph-mon[49698]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:01.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:00 vm01 ceph-mon[53908]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:01.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:00 vm06 ceph-mon[48187]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:02.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:08:02] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-09T20:08:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:02 vm01 ceph-mon[49698]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:02.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:02 vm01 ceph-mon[53908]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:02 vm06 ceph-mon[48187]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:04 vm06 ceph-mon[48187]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:04.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:04 vm01 ceph-mon[49698]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:04.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:04 vm01 ceph-mon[53908]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:06 vm06 ceph-mon[48187]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:06.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:06 vm01 ceph-mon[49698]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:06.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:06 vm01 ceph-mon[53908]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:06.959Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:06.960Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:07 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:07 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:07 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:07 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:07.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:07 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:07.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:07 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:08.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:08 vm06 ceph-mon[48187]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:08.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:08 vm01 ceph-mon[49698]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:08.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:08 vm01 ceph-mon[53908]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:10 vm06 ceph-mon[48187]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:10.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:10 vm01 ceph-mon[49698]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:10.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:10 vm01 ceph-mon[53908]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:12.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:12 vm06 ceph-mon[48187]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:12.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:08:12] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-09T20:08:12.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:12 vm01 ceph-mon[49698]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:12.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:12 vm01 ceph-mon[53908]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:14.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:14 vm01 ceph-mon[49698]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:14.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:14 vm01 ceph-mon[53908]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:14.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:14 vm06 ceph-mon[48187]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:16 vm06 ceph-mon[48187]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:16 vm01 ceph-mon[49698]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:16.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:16 vm01 ceph-mon[53908]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:16.960Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:16.961Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:17 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:17 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:17 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:18 vm06 ceph-mon[48187]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:18 vm01 ceph-mon[49698]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:18.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:18 vm01 ceph-mon[53908]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:20 vm06 ceph-mon[48187]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:20 vm01 ceph-mon[49698]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:20 vm01 ceph-mon[53908]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:22 vm06 ceph-mon[48187]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:22 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:08:22] "GET /metrics HTTP/1.1" 200 37547 "" "Prometheus/2.51.0" 2026-03-09T20:08:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:22 vm01 ceph-mon[49698]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:22 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:22 vm01 ceph-mon[53908]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:22 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:24.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:24 vm01 ceph-mon[49698]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:24.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:24 vm01 ceph-mon[53908]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:24.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:24 vm06 ceph-mon[48187]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:26 vm06 ceph-mon[48187]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:26 vm01 ceph-mon[49698]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:26 vm01 ceph-mon[53908]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:26.962Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:26.962Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:27 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:27.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:27 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:27.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:27 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:28 vm06 ceph-mon[48187]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:28.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:28 vm01 ceph-mon[49698]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:28.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:28 vm01 ceph-mon[53908]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:30 vm06 ceph-mon[48187]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:30 vm01 ceph-mon[49698]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:30 vm01 ceph-mon[53908]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:32.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:32 vm06 ceph-mon[48187]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:32.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:08:32] "GET /metrics HTTP/1.1" 200 37547 "" "Prometheus/2.51.0" 2026-03-09T20:08:32.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:32 vm01 ceph-mon[49698]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:32.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:32 vm01 ceph-mon[53908]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:34.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:34 vm01 ceph-mon[49698]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:34.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:34 vm01 ceph-mon[53908]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:34.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:34 vm06 ceph-mon[48187]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:36.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:36 vm06 ceph-mon[48187]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:36.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:36 vm01 ceph-mon[49698]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:36.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:36 vm01 ceph-mon[53908]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:36.962Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:36.963Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:37 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:37 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:37 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:37 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:37.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:37 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:37.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:37 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:38.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:38 vm06 ceph-mon[48187]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:38.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:38 vm01 ceph-mon[49698]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:38.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:38 vm01 ceph-mon[53908]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:40 vm06 ceph-mon[48187]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:40.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:40 vm01 ceph-mon[49698]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:40.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:40 vm01 ceph-mon[53908]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:42 vm06 ceph-mon[48187]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:42.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:42 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:08:42] "GET /metrics HTTP/1.1" 200 37548 "" "Prometheus/2.51.0" 2026-03-09T20:08:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:42 vm01 ceph-mon[49698]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:42 vm01 ceph-mon[53908]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:44.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:44 vm01 ceph-mon[49698]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:44.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:44 vm01 ceph-mon[53908]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:44.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:44 vm06 ceph-mon[48187]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:46.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:46 vm06 ceph-mon[48187]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:46.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:46 vm01 ceph-mon[49698]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:46.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:46 vm01 ceph-mon[53908]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:46.963Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:46.964Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:47 vm06 ceph-mon[48187]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:47.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:47 vm01 ceph-mon[49698]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:47.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:47 vm01 ceph-mon[53908]: from='client.15021 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:08:48.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:48 vm06 ceph-mon[48187]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:48.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:48 vm01 ceph-mon[49698]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:48.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:48 vm01 ceph-mon[53908]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:50.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:50 vm06 ceph-mon[48187]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:50.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:50 vm01 ceph-mon[49698]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:50.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:50 vm01 ceph-mon[53908]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:51.872 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:08:52.326 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:08:52.326 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (8m) 2m ago 15m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:08:52.326 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (8m) 2m ago 15m 48.0M - dad864ee21e9 694e845e2157 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (2m) 2m ago 15m 43.5M - 3.5 e1d6a67b021e a6a6edcf5f74 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283 running (11m) 2m ago 17m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 0b7c9ce0e145 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (5m) 2m ago 17m 544M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (17m) 2m ago 17m 70.6M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (17m) 2m ago 17m 50.7M 2048M 17.2.0 e1d6a67b021e 9f080c80b1b8 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (17m) 2m ago 17m 51.7M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (8m) 2m ago 15m 10.0M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (8m) 2m ago 15m 9.91M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (16m) 2m ago 16m 54.7M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (16m) 2m ago 16m 54.7M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (16m) 2m ago 16m 50.2M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (16m) 2m ago 16m 53.8M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (16m) 2m ago 16m 56.2M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (16m) 2m ago 16m 52.6M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (16m) 2m ago 16m 49.9M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (16m) 2m ago 16m 53.8M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (2m) 2m ago 15m 37.9M - 2.51.0 1d3b7f56885b 3ca8b1f52c41 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (15m) 2m ago 15m 93.3M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:08:52.327 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (15m) 2m ago 15m 92.8M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:08:52.371 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T20:08:52.620 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:52 vm01 ceph-mon[49698]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:52.620 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:52 vm01 ceph-mon[49698]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:52.620 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:08:52] "GET /metrics HTTP/1.1" 200 37548 "" "Prometheus/2.51.0" 2026-03-09T20:08:52.621 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:52 vm01 ceph-mon[53908]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:52.621 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:52 vm01 ceph-mon[53908]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "mds": {}, 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:08:52.834 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:08:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:52 vm06 ceph-mon[48187]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:08:52.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:52 vm06 ceph-mon[48187]: from='mgr.24868 192.168.123.101:0/684927059' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:52.876 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: cluster: 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: id: 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: health: HEALTH_OK 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: services: 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: mon: 3 daemons, quorum a,c,b (age 17m) 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: mgr: y(active, since 3m), standbys: x 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: osd: 8 osds: 8 up (since 15m), 8 in (since 16m) 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: data: 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: pools: 6 pools, 161 pgs 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: objects: 209 objects, 457 KiB 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: usage: 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:08:53.330 INFO:teuthology.orchestra.run.vm01.stdout: pgs: 161 active+clean 2026-03-09T20:08:53.331 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T20:08:53.331 INFO:teuthology.orchestra.run.vm01.stdout: io: 2026-03-09T20:08:53.331 INFO:teuthology.orchestra.run.vm01.stdout: client: 853 B/s rd, 0 op/s rd, 0 op/s wr 2026-03-09T20:08:53.331 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-09T20:08:53.394 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:08:53.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:53 vm06 ceph-mon[48187]: from='client.15033 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:08:53.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:53 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1486516514' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:08:53.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:53 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1251358578' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T20:08:53.847 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:53 vm01 ceph-mon[49698]: from='client.15033 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:08:53.847 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:53 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1486516514' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:08:53.847 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:53 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1251358578' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T20:08:53.848 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:53 vm01 ceph-mon[53908]: from='client.15033 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:08:53.848 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:53 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1486516514' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:08:53.848 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:53 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1251358578' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T20:08:53.883 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T20:08:53.932 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | length == 1'"'"'' 2026-03-09T20:08:54.511 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:08:54.551 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph mgr fail' 2026-03-09T20:08:54.705 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:54 vm01 ceph-mon[49698]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:54.705 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:54 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3150166822' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:08:54.705 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:54 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3081000679' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:08:54.705 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:54 vm01 ceph-mon[53908]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:54.705 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:54 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3150166822' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:08:54.705 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:54 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3081000679' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:08:54.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:54 vm06 ceph-mon[48187]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:08:54.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:54 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3150166822' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:08:54.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:54 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3081000679' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:08:55.677 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T20:08:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:55 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/247809767' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:08:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:55 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:08:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:55 vm06 ceph-mon[48187]: osdmap e88: 8 total, 8 up, 8 in 2026-03-09T20:08:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:55 vm06 ceph-mon[48187]: Standby manager daemon y started 2026-03-09T20:08:55.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:08:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:08:55] ENGINE Bus STOPPING 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:55 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/247809767' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:55 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:55 vm01 ceph-mon[53908]: osdmap e88: 8 total, 8 up, 8 in 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:55 vm01 ceph-mon[53908]: Standby manager daemon y started 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:55 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/247809767' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:55 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:55 vm01 ceph-mon[49698]: osdmap e88: 8 total, 8 up, 8 in 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:55 vm01 ceph-mon[49698]: Standby manager daemon y started 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:55.593+0000 7f76105fa640 -1 mgr handle_mgr_map I was active but no longer am 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ignoring --setuser ceph since I am not root 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ignoring --setgroup ceph since I am not root 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:55.735+0000 7f7a9a4b4140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T20:08:55.846 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:55.779+0000 7f7a9a4b4140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T20:08:56.197 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:08:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:08:55] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:08:56.197 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:08:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:08:55] ENGINE Bus STOPPED 2026-03-09T20:08:56.197 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:08:55 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:08:55] ENGINE Bus STARTING 2026-03-09T20:08:56.197 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:08:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:08:56] ENGINE Serving on http://:::9283 2026-03-09T20:08:56.197 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:08:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:08:56] ENGINE Bus STARTED 2026-03-09T20:08:56.561 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:56.210+0000 7f7a9a4b4140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T20:08:56.561 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:56.558+0000 7f7a9a4b4140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T20:08:56.857 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:08:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:08:56.838Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nvmeof msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=nvmeof\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:08:56.857 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:08:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:08:56.839Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph-exporter msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=ceph-exporter\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:08:56.857 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:08:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:08:56.841Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nfs msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=nfs\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:08:56.857 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:08:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:08:56.841Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=mgr-prometheus\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:08:56.857 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:08:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:08:56.842Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=node msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=node-exporter\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:08:56.857 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:08:56 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:08:56.842Z caller=refresh.go:90 level=error component="discovery manager notify" discovery=http config=config-0 msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=alertmanager\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:08:56.857 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:08:56.857 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:08:56.857 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:08:56.857 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: mgrmap e32: x(active, starting, since 0.533364s), standbys: y 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: Manager daemon x is now available 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T20:08:56.858 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:56 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: mgrmap e32: x(active, starting, since 0.533364s), standbys: y 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: Manager daemon x is now available 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T20:08:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: from numpy import show_config as show_numpy_config 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:56.687+0000 7f7a9a4b4140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:56.727+0000 7f7a9a4b4140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:56.809+0000 7f7a9a4b4140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:08:56.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:08:56.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T20:08:56.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: mgrmap e32: x(active, starting, since 0.533364s), standbys: y 2026-03-09T20:08:56.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: Manager daemon x is now available 2026-03-09T20:08:56.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:08:56.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:08:56.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:08:56.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T20:08:56.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:56 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T20:08:57.359 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:56.963Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:57.359 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:08:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:08:56.964Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:08:57.628 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:57.374+0000 7f7a9a4b4140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T20:08:57.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:57.501+0000 7f7a9a4b4140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:08:57.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:57.548+0000 7f7a9a4b4140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T20:08:57.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:57.586+0000 7f7a9a4b4140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T20:08:57.923 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:57.636+0000 7f7a9a4b4140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T20:08:57.923 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:57.705+0000 7f7a9a4b4140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T20:08:57.923 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[53908]: mgrmap e33: x(active, since 1.56107s), standbys: y 2026-03-09T20:08:57.923 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[53908]: [09/Mar/2026:20:08:56] ENGINE Bus STARTING 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[53908]: [09/Mar/2026:20:08:57] ENGINE Serving on http://192.168.123.106:8765 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[53908]: [09/Mar/2026:20:08:57] ENGINE Serving on https://192.168.123.106:7150 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[53908]: [09/Mar/2026:20:08:57] ENGINE Bus STARTED 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[53908]: [09/Mar/2026:20:08:57] ENGINE Client ('192.168.123.106', 57634) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[49698]: mgrmap e33: x(active, since 1.56107s), standbys: y 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[49698]: [09/Mar/2026:20:08:56] ENGINE Bus STARTING 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[49698]: [09/Mar/2026:20:08:57] ENGINE Serving on http://192.168.123.106:8765 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[49698]: [09/Mar/2026:20:08:57] ENGINE Serving on https://192.168.123.106:7150 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[49698]: [09/Mar/2026:20:08:57] ENGINE Bus STARTED 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[49698]: [09/Mar/2026:20:08:57] ENGINE Client ('192.168.123.106', 57634) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:57.924 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:57 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:57 vm06 ceph-mon[48187]: mgrmap e33: x(active, since 1.56107s), standbys: y 2026-03-09T20:08:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:57 vm06 ceph-mon[48187]: [09/Mar/2026:20:08:56] ENGINE Bus STARTING 2026-03-09T20:08:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:57 vm06 ceph-mon[48187]: [09/Mar/2026:20:08:57] ENGINE Serving on http://192.168.123.106:8765 2026-03-09T20:08:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:57 vm06 ceph-mon[48187]: [09/Mar/2026:20:08:57] ENGINE Serving on https://192.168.123.106:7150 2026-03-09T20:08:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:57 vm06 ceph-mon[48187]: [09/Mar/2026:20:08:57] ENGINE Bus STARTED 2026-03-09T20:08:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:57 vm06 ceph-mon[48187]: [09/Mar/2026:20:08:57] ENGINE Client ('192.168.123.106', 57634) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:08:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:57 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:57 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.212 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:57.919+0000 7f7a9a4b4140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T20:08:58.212 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:57.976+0000 7f7a9a4b4140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T20:08:58.212 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:58.211+0000 7f7a9a4b4140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T20:08:58.502 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:58.502+0000 7f7a9a4b4140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: mgrmap e34: x(active, since 2s), standbys: y 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:08:58.784 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:58.539+0000 7f7a9a4b4140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:58.580+0000 7f7a9a4b4140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:58.664+0000 7f7a9a4b4140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:58.703+0000 7f7a9a4b4140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:58.782+0000 7f7a9a4b4140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: mgrmap e34: x(active, since 2s), standbys: y 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:08:58.785 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:58 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:08:59.044 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:58.897+0000 7f7a9a4b4140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:08:59.044 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:59.043+0000 7f7a9a4b4140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: mgrmap e34: x(active, since 2s), standbys: y 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:08:59.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:08:59.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:08:59.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:58 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:08:59.298 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:59.092+0000 7f7a9a4b4140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T20:08:59.298 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:08:59] ENGINE Bus STARTING 2026-03-09T20:08:59.298 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: CherryPy Checker: 2026-03-09T20:08:59.298 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: The Application mounted at '' has an empty config. 2026-03-09T20:08:59.298 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:08:59.298 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:08:59] ENGINE Serving on http://:::9283 2026-03-09T20:08:59.298 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:08:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:08:59] ENGINE Bus STARTED 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Standby manager daemon y restarted 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Standby manager daemon y started 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:09:00.213 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:09:00.214 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.214 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:08:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:09:00.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Standby manager daemon y restarted 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Standby manager daemon y started 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Standby manager daemon y restarted 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Standby manager daemon y started 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T20:09:00.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:09:00.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:09:00.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:09:00.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:09:00.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:09:00.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:09:00.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:08:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 systemd[1]: Stopping Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.335Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.336Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.336Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.336Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.336Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.336Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.336Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.336Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.337Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.338Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.338Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[73598]: ts=2026-03-09T20:09:00.338Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 podman[75714]: 2026-03-09 20:09:00.346323434 +0000 UTC m=+0.023387075 container died 3ca8b1f52c41587b5ae235941294c192d6801c074946640ca624eee007cdc67f (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 podman[75714]: 2026-03-09 20:09:00.360672762 +0000 UTC m=+0.037736403 container remove 3ca8b1f52c41587b5ae235941294c192d6801c074946640ca624eee007cdc67f (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 bash[75714]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a.service: Deactivated successfully. 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 systemd[1]: Stopped Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:09:00.520 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 systemd[1]: Starting Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:09:00.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:09:00] ENGINE Bus STOPPING 2026-03-09T20:09:00.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 podman[75781]: 2026-03-09 20:09:00.520771365 +0000 UTC m=+0.019790112 container create 25d68717ab9df67a56e5e49562f175588ef1b0042cdc3a6c103de756e4f84562 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 podman[75781]: 2026-03-09 20:09:00.553107448 +0000 UTC m=+0.052126205 container init 25d68717ab9df67a56e5e49562f175588ef1b0042cdc3a6c103de756e4f84562 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 podman[75781]: 2026-03-09 20:09:00.555586147 +0000 UTC m=+0.054604904 container start 25d68717ab9df67a56e5e49562f175588ef1b0042cdc3a6c103de756e4f84562 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 bash[75781]: 25d68717ab9df67a56e5e49562f175588ef1b0042cdc3a6c103de756e4f84562 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 podman[75781]: 2026-03-09 20:09:00.512550789 +0000 UTC m=+0.011569546 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 systemd[1]: Started Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.581Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.581Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.581Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm06 (none))" 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.581Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.581Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.583Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.584Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.585Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.585Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.586Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.586Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=349.756µs 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.586Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.599Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=4 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.608Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=4 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.622Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=4 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.631Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=4 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.631Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=4 maxSegment=4 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.631Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=79.659µs wal_replay_duration=44.650711ms wbl_replay_duration=130ns total_replay_duration=45.102658ms 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.634Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.634Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.634Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.647Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=13.423686ms db_storage=671ns remote_storage=1.323µs web_handler=351ns query_engine=591ns scrape=597.317µs scrape_sd=90.961µs notify=6.332µs notify_sd=5.681µs rules=12.465472ms tracing=4.628µs 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.647Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T20:09:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:09:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:09:00.647Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T20:09:01.198 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:09:01] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:09:01] ENGINE Bus STOPPED 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:09:01] ENGINE Bus STARTING 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:09:01] ENGINE Serving on http://:::9283 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:09:01] ENGINE Bus STARTED 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: mgrmap e35: x(active, since 5s), standbys: y 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3969100634' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3951578398' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/1759319689"}]: dispatch 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/1759319689"}]: dispatch 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:09:01.199 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:01 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:01.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:09:01.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:09:01.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T20:09:01.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: mgrmap e35: x(active, since 5s), standbys: y 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3969100634' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3951578398' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/1759319689"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/1759319689"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: mgrmap e35: x(active, since 5s), standbys: y 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3969100634' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3951578398' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/1759319689"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/1759319689"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:09:01.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:01 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/1759319689"}]': finished 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: osdmap e89: 8 total, 8 up, 8 in 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1597861542' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3824961904"}]: dispatch 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:09:02.087 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/1759319689"}]': finished 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: osdmap e89: 8 total, 8 up, 8 in 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1597861542' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3824961904"}]: dispatch 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:09:02.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:02 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/1759319689"}]': finished 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: osdmap e89: 8 total, 8 up, 8 in 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1597861542' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3824961904"}]: dispatch 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:09:02.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:02 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:09:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:03 vm06 ceph-mon[48187]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:09:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:03 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1597861542' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3824961904"}]': finished 2026-03-09T20:09:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:03 vm06 ceph-mon[48187]: osdmap e90: 8 total, 8 up, 8 in 2026-03-09T20:09:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:03 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3296731005' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/802941014"}]: dispatch 2026-03-09T20:09:03.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:03 vm01 ceph-mon[49698]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:09:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:03 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1597861542' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3824961904"}]': finished 2026-03-09T20:09:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:03 vm01 ceph-mon[49698]: osdmap e90: 8 total, 8 up, 8 in 2026-03-09T20:09:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:03 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3296731005' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/802941014"}]: dispatch 2026-03-09T20:09:03.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:03 vm01 ceph-mon[53908]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:09:03.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:03 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1597861542' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3824961904"}]': finished 2026-03-09T20:09:03.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:03 vm01 ceph-mon[53908]: osdmap e90: 8 total, 8 up, 8 in 2026-03-09T20:09:03.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:03 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3296731005' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/802941014"}]: dispatch 2026-03-09T20:09:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:04 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/3296731005' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/802941014"}]': finished 2026-03-09T20:09:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:04 vm01 ceph-mon[49698]: osdmap e91: 8 total, 8 up, 8 in 2026-03-09T20:09:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:04 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/519645487' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2739345839"}]: dispatch 2026-03-09T20:09:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:04 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2739345839"}]: dispatch 2026-03-09T20:09:04.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:04 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/3296731005' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/802941014"}]': finished 2026-03-09T20:09:04.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:04 vm01 ceph-mon[53908]: osdmap e91: 8 total, 8 up, 8 in 2026-03-09T20:09:04.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:04 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/519645487' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2739345839"}]: dispatch 2026-03-09T20:09:04.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:04 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2739345839"}]: dispatch 2026-03-09T20:09:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:04 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/3296731005' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/802941014"}]': finished 2026-03-09T20:09:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:04 vm06 ceph-mon[48187]: osdmap e91: 8 total, 8 up, 8 in 2026-03-09T20:09:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:04 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/519645487' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2739345839"}]: dispatch 2026-03-09T20:09:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:04 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2739345839"}]: dispatch 2026-03-09T20:09:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:05 vm06 ceph-mon[48187]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-09T20:09:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:05 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2739345839"}]': finished 2026-03-09T20:09:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:05 vm06 ceph-mon[48187]: osdmap e92: 8 total, 8 up, 8 in 2026-03-09T20:09:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:05 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/761936911' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/802941014"}]: dispatch 2026-03-09T20:09:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:05 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/802941014"}]: dispatch 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[49698]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2739345839"}]': finished 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[49698]: osdmap e92: 8 total, 8 up, 8 in 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/761936911' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/802941014"}]: dispatch 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/802941014"}]: dispatch 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[53908]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2739345839"}]': finished 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[53908]: osdmap e92: 8 total, 8 up, 8 in 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/761936911' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/802941014"}]: dispatch 2026-03-09T20:09:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:05 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/802941014"}]: dispatch 2026-03-09T20:09:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:06 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/802941014"}]': finished 2026-03-09T20:09:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:06 vm06 ceph-mon[48187]: osdmap e93: 8 total, 8 up, 8 in 2026-03-09T20:09:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:06 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2077860220' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2079158361"}]: dispatch 2026-03-09T20:09:06.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:06 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2079158361"}]: dispatch 2026-03-09T20:09:06.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:06 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/802941014"}]': finished 2026-03-09T20:09:06.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:06 vm01 ceph-mon[49698]: osdmap e93: 8 total, 8 up, 8 in 2026-03-09T20:09:06.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:06 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2077860220' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2079158361"}]: dispatch 2026-03-09T20:09:06.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:06 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2079158361"}]: dispatch 2026-03-09T20:09:06.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:06 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/802941014"}]': finished 2026-03-09T20:09:06.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:06 vm01 ceph-mon[53908]: osdmap e93: 8 total, 8 up, 8 in 2026-03-09T20:09:06.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:06 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2077860220' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2079158361"}]: dispatch 2026-03-09T20:09:06.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:06 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2079158361"}]: dispatch 2026-03-09T20:09:07.155 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:07 vm06 ceph-mon[48187]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 0 B/s wr, 19 op/s 2026-03-09T20:09:07.155 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:07 vm06 ceph-mon[48187]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2079158361"}]': finished 2026-03-09T20:09:07.160 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:07 vm06 ceph-mon[48187]: osdmap e94: 8 total, 8 up, 8 in 2026-03-09T20:09:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:06.965Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:06.966Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:07.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:07 vm01 ceph-mon[49698]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 0 B/s wr, 19 op/s 2026-03-09T20:09:07.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:07 vm01 ceph-mon[49698]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2079158361"}]': finished 2026-03-09T20:09:07.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:07 vm01 ceph-mon[49698]: osdmap e94: 8 total, 8 up, 8 in 2026-03-09T20:09:07.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:07 vm01 ceph-mon[53908]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 0 B/s wr, 19 op/s 2026-03-09T20:09:07.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:07 vm01 ceph-mon[53908]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/2079158361"}]': finished 2026-03-09T20:09:07.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:07 vm01 ceph-mon[53908]: osdmap e94: 8 total, 8 up, 8 in 2026-03-09T20:09:09.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:09 vm01 ceph-mon[49698]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:09:09.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:09 vm01 ceph-mon[53908]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:09:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:09 vm06 ceph-mon[48187]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:09:11.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:11 vm01 ceph-mon[49698]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:11.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:11 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:11.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:11 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:11.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:11 vm01 ceph-mon[53908]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:11.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:11 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:11.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:11 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:11 vm06 ceph-mon[48187]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:11 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:11 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:13.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:13 vm06 ceph-mon[48187]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-09T20:09:13.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:13 vm01 ceph-mon[49698]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-09T20:09:13.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:13 vm01 ceph-mon[53908]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-09T20:09:14.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:09:14] "GET /metrics HTTP/1.1" 200 37545 "" "Prometheus/2.51.0" 2026-03-09T20:09:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:15 vm06 ceph-mon[48187]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:15 vm01 ceph-mon[49698]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:15 vm01 ceph-mon[53908]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:16.966Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:16.966Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:17 vm06 ceph-mon[48187]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:09:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:17 vm01 ceph-mon[49698]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:09:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:17 vm01 ceph-mon[53908]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:09:19.644 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:19 vm06 ceph-mon[48187]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:09:19.681 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:19 vm01 ceph-mon[49698]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:09:19.681 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:19 vm01 ceph-mon[53908]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:09:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:21 vm06 ceph-mon[48187]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:21 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:21 vm01 ceph-mon[49698]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:21 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:21 vm01 ceph-mon[53908]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:21 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:23.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:23 vm06 ceph-mon[48187]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:23.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:23 vm01 ceph-mon[49698]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:23.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:23 vm01 ceph-mon[53908]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:24.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:09:24] "GET /metrics HTTP/1.1" 200 37545 "" "Prometheus/2.51.0" 2026-03-09T20:09:25.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:25 vm06 ceph-mon[48187]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:25.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:25 vm01 ceph-mon[49698]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:25.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:25 vm01 ceph-mon[53908]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:26 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:26 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:26 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:26.967Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:26.967Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:27.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:27 vm06 ceph-mon[48187]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:27.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:27 vm01 ceph-mon[49698]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:27.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:27 vm01 ceph-mon[53908]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:29 vm06 ceph-mon[48187]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:29.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:29 vm01 ceph-mon[49698]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:29 vm01 ceph-mon[53908]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:31 vm06 ceph-mon[48187]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:31 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:31.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:31 vm01 ceph-mon[49698]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:31.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:31 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:31.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:31 vm01 ceph-mon[53908]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:31.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:31 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:33 vm06 ceph-mon[48187]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:33.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:33 vm01 ceph-mon[49698]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:33.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:33 vm01 ceph-mon[53908]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:34.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:09:34] "GET /metrics HTTP/1.1" 200 37542 "" "Prometheus/2.51.0" 2026-03-09T20:09:35.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:35 vm06 ceph-mon[48187]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:35.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:35 vm01 ceph-mon[49698]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:35.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:35 vm01 ceph-mon[53908]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:36.967Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:36.968Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:37 vm06 ceph-mon[48187]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:37 vm01 ceph-mon[49698]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:37.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:37 vm01 ceph-mon[53908]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:39.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:39 vm06 ceph-mon[48187]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:39.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:39 vm01 ceph-mon[49698]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:39.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:39 vm01 ceph-mon[53908]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:41 vm06 ceph-mon[48187]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:41 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:41 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:41 vm01 ceph-mon[49698]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:41 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:41 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:41.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:41 vm01 ceph-mon[53908]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:41.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:41 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:41.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:41 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:43 vm06 ceph-mon[48187]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:43 vm01 ceph-mon[49698]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:43.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:43 vm01 ceph-mon[53908]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:44.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:09:44] "GET /metrics HTTP/1.1" 200 37541 "" "Prometheus/2.51.0" 2026-03-09T20:09:45.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:45 vm06 ceph-mon[48187]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:45.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:45 vm01 ceph-mon[49698]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:45.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:45 vm01 ceph-mon[53908]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:46.969Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:46.969Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:47 vm06 ceph-mon[48187]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:47.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:47 vm01 ceph-mon[49698]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:47.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:47 vm01 ceph-mon[53908]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:49.827 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:49 vm01 ceph-mon[49698]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:49.828 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:49 vm01 ceph-mon[53908]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:49 vm06 ceph-mon[48187]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:51 vm06 ceph-mon[48187]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:51 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:51.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:51 vm01 ceph-mon[49698]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:51.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:51 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:51.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:51 vm01 ceph-mon[53908]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:51.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:51 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:09:53.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:53 vm06 ceph-mon[48187]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:53.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:53 vm01 ceph-mon[49698]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:53.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:53 vm01 ceph-mon[53908]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:54.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:09:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:09:54] "GET /metrics HTTP/1.1" 200 37541 "" "Prometheus/2.51.0" 2026-03-09T20:09:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:55 vm06 ceph-mon[48187]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:55.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:55 vm01 ceph-mon[49698]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:55.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:55 vm01 ceph-mon[53908]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:56.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:09:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:56.970Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:09:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:09:56.970Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:09:57.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:57 vm06 ceph-mon[48187]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:57 vm01 ceph-mon[49698]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:57.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:57 vm01 ceph-mon[53908]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:09:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:09:59 vm06 ceph-mon[48187]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:09:59 vm01 ceph-mon[49698]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:09:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:09:59 vm01 ceph-mon[53908]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:00.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:00 vm06 ceph-mon[48187]: overall HEALTH_OK 2026-03-09T20:10:00.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:00 vm01 ceph-mon[49698]: overall HEALTH_OK 2026-03-09T20:10:00.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:00 vm01 ceph-mon[53908]: overall HEALTH_OK 2026-03-09T20:10:01.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:01 vm06 ceph-mon[48187]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:01.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:01 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:01.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:01 vm01 ceph-mon[49698]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:01.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:01 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:01.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:01 vm01 ceph-mon[53908]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:01.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:01 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:02 vm06 ceph-mon[48187]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:02 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:10:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:02 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:10:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:02 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:10:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:02 vm01 ceph-mon[49698]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:02 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:10:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:02 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:10:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:02 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:10:02.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:02 vm01 ceph-mon[53908]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:02.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:02 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:10:02.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:02 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:10:02.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:02 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:10:04.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:10:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:10:04] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-09T20:10:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:05 vm06 ceph-mon[48187]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:05.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:05 vm01 ceph-mon[49698]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:05.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:05 vm01 ceph-mon[53908]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:06.971Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:06.971Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:07 vm06 ceph-mon[48187]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:07 vm01 ceph-mon[49698]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:07.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:07 vm01 ceph-mon[53908]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:09 vm06 ceph-mon[48187]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:09 vm01 ceph-mon[49698]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:09.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:09 vm01 ceph-mon[53908]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:11 vm06 ceph-mon[48187]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:11 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:11 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:11 vm01 ceph-mon[49698]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:11 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:11 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:11 vm01 ceph-mon[53908]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:11 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:11 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:13 vm06 ceph-mon[48187]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:13.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:13 vm01 ceph-mon[49698]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:13.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:13 vm01 ceph-mon[53908]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:14.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:10:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:10:14] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-09T20:10:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:15 vm06 ceph-mon[48187]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:15 vm01 ceph-mon[49698]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:15 vm01 ceph-mon[53908]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:16.971Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:16.972Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:17 vm06 ceph-mon[48187]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:17 vm01 ceph-mon[49698]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:17 vm01 ceph-mon[53908]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:19.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:19 vm06 ceph-mon[48187]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:19.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:19 vm01 ceph-mon[49698]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:19.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:19 vm01 ceph-mon[53908]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:21 vm06 ceph-mon[48187]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:21 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:21 vm01 ceph-mon[49698]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:21 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:21 vm01 ceph-mon[53908]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:21 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:23.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:23 vm06 ceph-mon[48187]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:23.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:23 vm01 ceph-mon[49698]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:23.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:23 vm01 ceph-mon[53908]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:24.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:10:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:10:24] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-09T20:10:25.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:25 vm06 ceph-mon[48187]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:25.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:25 vm01 ceph-mon[49698]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:25.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:25 vm01 ceph-mon[53908]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:26 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:26 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:26 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:26.972Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:26.973Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:27.841 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:27 vm06 ceph-mon[48187]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:27.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:27 vm01 ceph-mon[49698]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:27.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:27 vm01 ceph-mon[53908]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:29 vm06 ceph-mon[48187]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:29.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:29 vm01 ceph-mon[49698]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:29 vm01 ceph-mon[53908]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:31 vm06 ceph-mon[48187]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:31 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:31.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:31 vm01 ceph-mon[49698]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:31.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:31 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:31.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:31 vm01 ceph-mon[53908]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:31.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:31 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:33 vm06 ceph-mon[48187]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:33.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:33 vm01 ceph-mon[49698]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:33.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:33 vm01 ceph-mon[53908]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:34.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:10:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:10:34] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-09T20:10:35.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:35 vm06 ceph-mon[48187]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:35.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:35 vm01 ceph-mon[49698]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:35.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:35 vm01 ceph-mon[53908]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:36.973Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:36.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:37 vm06 ceph-mon[48187]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:37 vm01 ceph-mon[49698]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:37.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:37 vm01 ceph-mon[53908]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:39.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:39 vm06 ceph-mon[48187]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:39.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:39 vm01 ceph-mon[49698]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:39.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:39 vm01 ceph-mon[53908]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:41 vm06 ceph-mon[48187]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:41 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:41 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:41 vm01 ceph-mon[49698]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:41 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:41 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:41.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:41 vm01 ceph-mon[53908]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:41.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:41 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:41.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:41 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:43 vm06 ceph-mon[48187]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:43 vm01 ceph-mon[49698]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:43.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:43 vm01 ceph-mon[53908]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:44.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:10:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:10:44] "GET /metrics HTTP/1.1" 200 37540 "" "Prometheus/2.51.0" 2026-03-09T20:10:45.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:45 vm06 ceph-mon[48187]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:45.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:45 vm01 ceph-mon[49698]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:45.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:45 vm01 ceph-mon[53908]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:46.973Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:46.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:47 vm06 ceph-mon[48187]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:47.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:47 vm01 ceph-mon[49698]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:47.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:47 vm01 ceph-mon[53908]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:49 vm06 ceph-mon[48187]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:49.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:49 vm01 ceph-mon[49698]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:49.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:49 vm01 ceph-mon[53908]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:51 vm06 ceph-mon[48187]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:51 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:51.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:51 vm01 ceph-mon[49698]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:51.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:51 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:51.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:51 vm01 ceph-mon[53908]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:51.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:51 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:10:52.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:52 vm01 ceph-mon[49698]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:52.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:52 vm01 ceph-mon[53908]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:52 vm06 ceph-mon[48187]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:54.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:10:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:10:54] "GET /metrics HTTP/1.1" 200 37540 "" "Prometheus/2.51.0" 2026-03-09T20:10:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:55 vm06 ceph-mon[48187]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:55.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:55 vm01 ceph-mon[49698]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:55.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:55 vm01 ceph-mon[53908]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:56.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:56.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:10:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:56.974Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:10:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:10:56.975Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:10:57.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:57 vm06 ceph-mon[48187]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:57 vm01 ceph-mon[49698]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:57.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:57 vm01 ceph-mon[53908]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:10:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:10:59 vm06 ceph-mon[48187]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:10:59 vm01 ceph-mon[49698]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:10:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:10:59 vm01 ceph-mon[53908]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:01.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:01 vm06 ceph-mon[48187]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:01.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:01 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:01.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:01 vm01 ceph-mon[49698]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:01.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:01 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:01.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:01 vm01 ceph-mon[53908]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:01.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:01 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:02 vm06 ceph-mon[48187]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:02 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:11:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:02 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:11:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:02 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:11:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:02 vm01 ceph-mon[49698]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:02 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:11:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:02 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:11:02.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:02 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:11:02.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:02 vm01 ceph-mon[53908]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:02.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:02 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:11:02.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:02 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:11:02.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:02 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:11:04.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:11:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:11:04] "GET /metrics HTTP/1.1" 200 37539 "" "Prometheus/2.51.0" 2026-03-09T20:11:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:05 vm06 ceph-mon[48187]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:05.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:05 vm01 ceph-mon[49698]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:05.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:05 vm01 ceph-mon[53908]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:06.975Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:06.975Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:07 vm06 ceph-mon[48187]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:07 vm01 ceph-mon[49698]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:07.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:07 vm01 ceph-mon[53908]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:09 vm06 ceph-mon[48187]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:09 vm01 ceph-mon[49698]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:09.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:09 vm01 ceph-mon[53908]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:11 vm06 ceph-mon[48187]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:11 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:11 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:11 vm01 ceph-mon[49698]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:11 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:11 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:11 vm01 ceph-mon[53908]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:11 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:11 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:13 vm06 ceph-mon[48187]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:13.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:13 vm01 ceph-mon[49698]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:13.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:13 vm01 ceph-mon[53908]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:14.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:11:14 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:11:14] "GET /metrics HTTP/1.1" 200 37551 "" "Prometheus/2.51.0" 2026-03-09T20:11:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:15 vm06 ceph-mon[48187]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:15 vm01 ceph-mon[49698]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:15 vm01 ceph-mon[53908]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:16.975Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:16.976Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:17 vm06 ceph-mon[48187]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:17 vm01 ceph-mon[49698]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:17 vm01 ceph-mon[53908]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:19.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:19 vm06 ceph-mon[48187]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:19.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:19 vm01 ceph-mon[49698]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:19.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:19 vm01 ceph-mon[53908]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:20 vm01 ceph-mon[49698]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:20 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:20 vm01 ceph-mon[53908]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:20 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:20 vm06 ceph-mon[48187]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:20 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:22 vm06 ceph-mon[48187]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:23.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:22 vm01 ceph-mon[49698]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:23.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:22 vm01 ceph-mon[53908]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:24.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:11:24 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:11:24] "GET /metrics HTTP/1.1" 200 37551 "" "Prometheus/2.51.0" 2026-03-09T20:11:25.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:25 vm06 ceph-mon[48187]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:25.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:25 vm01 ceph-mon[49698]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:25.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:25 vm01 ceph-mon[53908]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:26 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:26 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:26 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:26.976Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:26.977Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:27 vm06 ceph-mon[48187]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:27.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:27 vm01 ceph-mon[49698]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:27.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:27 vm01 ceph-mon[53908]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:29 vm06 ceph-mon[48187]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:29.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:29 vm01 ceph-mon[49698]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:29 vm01 ceph-mon[53908]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:31 vm06 ceph-mon[48187]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:31 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:31.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:31 vm01 ceph-mon[49698]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:31.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:31 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:31.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:31 vm01 ceph-mon[53908]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:31.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:31 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:32.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:32 vm01 ceph-mon[49698]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:32.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:32 vm01 ceph-mon[53908]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:33.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:32 vm06 ceph-mon[48187]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:34.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:11:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:11:34] "GET /metrics HTTP/1.1" 200 37551 "" "Prometheus/2.51.0" 2026-03-09T20:11:35.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:35 vm06 ceph-mon[48187]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:35.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:35 vm01 ceph-mon[49698]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:35.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:35 vm01 ceph-mon[53908]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:36.978Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:36.979Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:37 vm06 ceph-mon[48187]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:37 vm01 ceph-mon[49698]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:37.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:37 vm01 ceph-mon[53908]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:39.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:39 vm06 ceph-mon[48187]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:39.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:39 vm01 ceph-mon[49698]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:39.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:39 vm01 ceph-mon[53908]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:40 vm06 ceph-mon[48187]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:40 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:40.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:40 vm01 ceph-mon[49698]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:40.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:40 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:40.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:40 vm01 ceph-mon[53908]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:40.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:40 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:41 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:41 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:41.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:41 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:42.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:42 vm06 ceph-mon[48187]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:42 vm01 ceph-mon[49698]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:42 vm01 ceph-mon[53908]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:44.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:11:44 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:11:44] "GET /metrics HTTP/1.1" 200 37548 "" "Prometheus/2.51.0" 2026-03-09T20:11:45.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:45 vm06 ceph-mon[48187]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:45.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:45 vm01 ceph-mon[49698]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:45.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:45 vm01 ceph-mon[53908]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:46.978Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:46.980Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:47 vm06 ceph-mon[48187]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:47.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:47 vm01 ceph-mon[49698]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:47.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:47 vm01 ceph-mon[53908]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:49 vm06 ceph-mon[48187]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:49.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:49 vm01 ceph-mon[49698]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:49.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:49 vm01 ceph-mon[53908]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:51 vm06 ceph-mon[48187]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:51.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:51 vm06 ceph-mon[48187]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:51.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:51 vm01 ceph-mon[49698]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:51.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:51 vm01 ceph-mon[49698]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:51.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:51 vm01 ceph-mon[53908]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:51.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:51 vm01 ceph-mon[53908]: from='client.24881 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:11:53.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:53 vm06 ceph-mon[48187]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:53.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:53 vm01 ceph-mon[53908]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:53.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:53 vm01 ceph-mon[49698]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:54.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:11:54 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:11:54] "GET /metrics HTTP/1.1" 200 37548 "" "Prometheus/2.51.0" 2026-03-09T20:11:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:55 vm06 ceph-mon[48187]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:55.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:55 vm01 ceph-mon[49698]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:55.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:55 vm01 ceph-mon[53908]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:56.075 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:11:56.569 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:56 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (12m) 2m ago 18m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (11m) 2m ago 18m 48.0M - dad864ee21e9 694e845e2157 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (2m) 2m ago 18m 69.4M - 3.5 e1d6a67b021e 361be1660694 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283 running (14m) 2m ago 20m 545M - 19.2.3-678-ge911bdeb 654f31e6858e 0b7c9ce0e145 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (8m) 2m ago 20m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (20m) 2m ago 21m 73.1M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (20m) 2m ago 20m 51.3M 2048M 17.2.0 e1d6a67b021e 9f080c80b1b8 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (20m) 2m ago 20m 52.6M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (12m) 2m ago 18m 10.1M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (11m) 2m ago 18m 10.0M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (20m) 2m ago 20m 55.2M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (19m) 2m ago 19m 54.8M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (19m) 2m ago 19m 50.3M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (19m) 2m ago 19m 54.0M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (19m) 2m ago 19m 56.2M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (19m) 2m ago 19m 52.8M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (19m) 2m ago 19m 50.2M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (19m) 2m ago 19m 54.0M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (2m) 2m ago 18m 44.0M - 2.51.0 1d3b7f56885b 25d68717ab9d 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (18m) 2m ago 18m 94.5M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:11:56.630 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (18m) 2m ago 18m 93.8M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:11:56.697 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls' 2026-03-09T20:11:56.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:56 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:56.862 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:56 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:11:57.128 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:56.979Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:57.128 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:11:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:11:56.983Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:11:57.222 INFO:teuthology.orchestra.run.vm01.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-09T20:11:57.223 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager ?:9093,9094 1/1 2m ago 18m vm01=a;count:1 2026-03-09T20:11:57.223 INFO:teuthology.orchestra.run.vm01.stdout:grafana ?:3000 1/1 2m ago 18m vm06=a;count:1 2026-03-09T20:11:57.223 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo ?:5000 1/1 2m ago 18m count:1 2026-03-09T20:11:57.223 INFO:teuthology.orchestra.run.vm01.stdout:mgr 2/2 2m ago 20m vm01=y;vm06=x;count:2 2026-03-09T20:11:57.223 INFO:teuthology.orchestra.run.vm01.stdout:mon 3/3 2m ago 20m vm01:192.168.123.101=a;vm01:[v2:192.168.123.101:3301,v1:192.168.123.101:6790]=c;vm06:192.168.123.106=b;count:3 2026-03-09T20:11:57.223 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter ?:9100 2/2 2m ago 18m vm01=a;vm06=b;count:2 2026-03-09T20:11:57.223 INFO:teuthology.orchestra.run.vm01.stdout:osd 8 2m ago - 2026-03-09T20:11:57.223 INFO:teuthology.orchestra.run.vm01.stdout:prometheus ?:9095 1/1 2m ago 18m vm06=a;count:1 2026-03-09T20:11:57.223 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo ?:8000 2/2 2m ago 18m count:2 2026-03-09T20:11:57.274 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T20:11:57.826 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:57 vm01 ceph-mon[49698]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:57.827 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:57 vm01 ceph-mon[49698]: from='client.15114 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:11:57.827 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:57 vm01 ceph-mon[49698]: from='client.24911 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:11:57.827 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:57 vm01 ceph-mon[53908]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:57.827 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:57 vm01 ceph-mon[53908]: from='client.15114 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:11:57.827 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:57 vm01 ceph-mon[53908]: from='client.24911 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "mds": {}, 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:11:57.827 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:11:57.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:57 vm06 ceph-mon[48187]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:11:57.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:57 vm06 ceph-mon[48187]: from='client.15114 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:11:57.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:57 vm06 ceph-mon[48187]: from='client.24911 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:11:57.904 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr' 2026-03-09T20:11:58.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:58 vm01 ceph-mon[49698]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:58.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:58 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2437494981' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:11:58.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:58 vm01 ceph-mon[49698]: from='client.25051 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:11:58.724 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:58 vm01 ceph-mon[53908]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:58.724 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:58 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2437494981' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:11:58.724 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:58 vm01 ceph-mon[53908]: from='client.25051 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:11:58.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:58 vm06 ceph-mon[48187]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:11:58.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:58 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2437494981' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:11:58.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:58 vm06 ceph-mon[48187]: from='client.25051 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:11:58.874 INFO:teuthology.orchestra.run.vm01.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:11:58.928 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T20:11:59.540 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (12m) 2m ago 18m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (11m) 2m ago 18m 48.0M - dad864ee21e9 694e845e2157 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (3m) 2m ago 18m 69.4M - 3.5 e1d6a67b021e 361be1660694 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283 running (14m) 2m ago 20m 545M - 19.2.3-678-ge911bdeb 654f31e6858e 0b7c9ce0e145 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (8m) 2m ago 21m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (21m) 2m ago 21m 73.1M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (20m) 2m ago 20m 51.3M 2048M 17.2.0 e1d6a67b021e 9f080c80b1b8 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (20m) 2m ago 20m 52.6M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (12m) 2m ago 18m 10.1M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (11m) 2m ago 18m 10.0M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (20m) 2m ago 20m 55.2M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (19m) 2m ago 19m 54.8M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (19m) 2m ago 19m 50.3M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (19m) 2m ago 19m 54.0M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (19m) 2m ago 19m 56.2M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (19m) 2m ago 19m 52.8M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (19m) 2m ago 19m 50.2M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (19m) 2m ago 19m 54.0M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (2m) 2m ago 18m 44.0M - 2.51.0 1d3b7f56885b 25d68717ab9d 2026-03-09T20:11:59.954 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (18m) 2m ago 18m 94.5M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:11:59.955 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (18m) 2m ago 18m 93.8M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: Failing over to other MGR 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[49698]: from='client.15126 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: Failing over to other MGR 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T20:12:00.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:11:59 vm01 ceph-mon[53908]: from='client.15126 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "mds": {}, 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:12:00.193 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:12:00.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: Failing over to other MGR 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: from='mgr.24800 192.168.123.106:0/233250659' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T20:12:00.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:11:59 vm06 ceph-mon[48187]: from='client.15126 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:00.403 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:12:00.403 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:12:00.403 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:12:00.403 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons of type(s) mgr", 2026-03-09T20:12:00.404 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [ 2026-03-09T20:12:00.404 INFO:teuthology.orchestra.run.vm01.stdout: "mgr" 2026-03-09T20:12:00.404 INFO:teuthology.orchestra.run.vm01.stdout: ], 2026-03-09T20:12:00.404 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "2/2 daemons upgraded", 2026-03-09T20:12:00.404 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-09T20:12:00.404 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:12:00.404 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:12:00.459 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:00] ENGINE Bus STOPPING 2026-03-09T20:12:00.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:00.648Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nfs msg="Unable to refresh target groups" err="Get \"http://192.168.123.106:8765/sd/prometheus/sd-config?service=nfs\": dial tcp 192.168.123.106:8765: connect: connection refused" 2026-03-09T20:12:00.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:00.648Z caller=refresh.go:90 level=error component="discovery manager notify" discovery=http config=config-0 msg="Unable to refresh target groups" err="Get \"http://192.168.123.106:8765/sd/prometheus/sd-config?service=alertmanager\": dial tcp 192.168.123.106:8765: connect: connection refused" 2026-03-09T20:12:00.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:00.648Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nvmeof msg="Unable to refresh target groups" err="Get \"http://192.168.123.106:8765/sd/prometheus/sd-config?service=nvmeof\": dial tcp 192.168.123.106:8765: connect: connection refused" 2026-03-09T20:12:00.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:00.648Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph msg="Unable to refresh target groups" err="Get \"http://192.168.123.106:8765/sd/prometheus/sd-config?service=mgr-prometheus\": dial tcp 192.168.123.106:8765: connect: connection refused" 2026-03-09T20:12:00.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:00.648Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph-exporter msg="Unable to refresh target groups" err="Get \"http://192.168.123.106:8765/sd/prometheus/sd-config?service=ceph-exporter\": dial tcp 192.168.123.106:8765: connect: connection refused" 2026-03-09T20:12:00.843 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:00.648Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=node msg="Unable to refresh target groups" err="Get \"http://192.168.123.106:8765/sd/prometheus/sd-config?service=node-exporter\": dial tcp 192.168.123.106:8765: connect: connection refused" 2026-03-09T20:12:00.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:00.423+0000 7f53e4bee640 -1 mgr handle_mgr_map I was active but no longer am 2026-03-09T20:12:00.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ignoring --setuser ceph since I am not root 2026-03-09T20:12:00.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ignoring --setgroup ceph since I am not root 2026-03-09T20:12:00.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mgr[66408]: -- 192.168.123.106:0/3197809474 <== mon.2 v2:192.168.123.106:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x556054ca34a0 con 0x556054c80800 2026-03-09T20:12:00.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:00.540+0000 7f0988e7b140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T20:12:00.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:00 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:00.581+0000 7f0988e7b140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:00] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:00] ENGINE Bus STOPPED 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:00 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:00] ENGINE Bus STARTING 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:01] ENGINE Serving on http://:::9283 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:01 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:01] ENGINE Bus STARTED 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2590577921' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24800 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: mgrmap e36: y(active, starting, since 1.00305s) 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:12:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2590577921' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24800 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: mgrmap e36: y(active, starting, since 1.00305s) 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:12:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:00 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/2590577921' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24800 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: mgrmap e36: y(active, starting, since 1.00305s) 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:00 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:12:01.205 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:01.003+0000 7f0988e7b140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T20:12:01.461 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:01.351+0000 7f0988e7b140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T20:12:01.461 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T20:12:01.461 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T20:12:01.461 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: from numpy import show_config as show_numpy_config 2026-03-09T20:12:01.461 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:01.459+0000 7f0988e7b140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T20:12:01.713 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:01.504+0000 7f0988e7b140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T20:12:01.713 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:01 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:01.597+0000 7f0988e7b140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[53908]: Manager daemon y is now available 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[49698]: Manager daemon y is now available 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:12:01.985 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:01 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:12:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:01 vm06 ceph-mon[48187]: Manager daemon y is now available 2026-03-09T20:12:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:01 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:01 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:12:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:01 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:12:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:01 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:12:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:01 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:12:02.481 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:02.245+0000 7f0988e7b140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T20:12:02.481 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:02.382+0000 7f0988e7b140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:12:02.481 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:02.437+0000 7f0988e7b140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T20:12:02.481 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:02.478+0000 7f0988e7b140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T20:12:02.771 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:02.526+0000 7f0988e7b140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T20:12:02.771 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:02.567+0000 7f0988e7b140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T20:12:03.077 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: [09/Mar/2026:20:12:01] ENGINE Bus STARTING 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: mgrmap e37: y(active, since 2s) 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: [09/Mar/2026:20:12:01] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: [09/Mar/2026:20:12:01] ENGINE Client ('192.168.123.101', 37858) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: [09/Mar/2026:20:12:02] ENGINE Serving on http://192.168.123.101:8765 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: [09/Mar/2026:20:12:02] ENGINE Bus STARTED 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:02 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:02.769+0000 7f0988e7b140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T20:12:03.078 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:02 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:02.827+0000 7f0988e7b140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T20:12:03.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:03.076+0000 7f0988e7b140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T20:12:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: [09/Mar/2026:20:12:01] ENGINE Bus STARTING 2026-03-09T20:12:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: mgrmap e37: y(active, since 2s) 2026-03-09T20:12:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: [09/Mar/2026:20:12:01] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T20:12:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: [09/Mar/2026:20:12:01] ENGINE Client ('192.168.123.101', 37858) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:12:03.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: [09/Mar/2026:20:12:02] ENGINE Serving on http://192.168.123.101:8765 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: [09/Mar/2026:20:12:02] ENGINE Bus STARTED 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: [09/Mar/2026:20:12:01] ENGINE Bus STARTING 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: mgrmap e37: y(active, since 2s) 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: [09/Mar/2026:20:12:01] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: [09/Mar/2026:20:12:01] ENGINE Client ('192.168.123.101', 37858) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: [09/Mar/2026:20:12:02] ENGINE Serving on http://192.168.123.101:8765 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: [09/Mar/2026:20:12:02] ENGINE Bus STARTED 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:02 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:03.707 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:03.390+0000 7f0988e7b140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T20:12:03.707 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:03.434+0000 7f0988e7b140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T20:12:03.707 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:03.484+0000 7f0988e7b140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T20:12:03.707 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:03.576+0000 7f0988e7b140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T20:12:03.707 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:03.617+0000 7f0988e7b140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T20:12:03.707 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:03.705+0000 7f0988e7b140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T20:12:03.997 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:03.829+0000 7f0988e7b140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: Standby manager daemon x started 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:04 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:03 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:03.996+0000 7f0988e7b140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:04.033+0000 7f0988e7b140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:12:04] ENGINE Bus STARTING 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: CherryPy Checker: 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: The Application mounted at '' has an empty config. 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:12:04] ENGINE Serving on http://:::9283 2026-03-09T20:12:04.343 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: [09/Mar/2026:20:12:04] ENGINE Bus STARTED 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.399 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: Standby manager daemon x started 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.conf 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: Standby manager daemon x started 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:12:04.400 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:04 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/3930785369' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:12:04.808 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:04 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[66404]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:12:04] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.51.0" 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:12:04.452+0000 7f7a57df7640 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm01.zbymog ... 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Creating ceph-iscsi config... 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/iscsi-gateway.cfg 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/tcmu-runner-entrypoint.sh 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Non-zero exit code 1 from systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: systemctl: stderr Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: systemctl: stderr See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Traceback (most recent call last): 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: return _run_code(code, main_globals, None, 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: exec(code, run_globals) 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: RuntimeError: Failed command: systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog: Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Traceback (most recent call last): 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: return self.wait_async( 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: return self.event_loop.get_result(coro, timeout) 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: return future.result(timeout) 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: return self.__get_result() 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: raise self._exception 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: out, err, code = await self._run_cephadm( 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T20:12:04.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: raise OrchestratorError( 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm01.zbymog ... 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Creating ceph-iscsi config... 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/iscsi-gateway.cfg 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/tcmu-runner-entrypoint.sh 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Non-zero exit code 1 from systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: systemctl: stderr Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: systemctl: stderr See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Traceback (most recent call last): 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: return _run_code(code, main_globals, None, 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: exec(code, run_globals) 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: RuntimeError: Failed command: systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog: Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:04.880 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:04 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.079 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 systemd[1]: Stopping Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.079Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.079Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.079Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.079Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.079Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.079Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.079Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.079Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.081Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.082Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.082Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[75792]: ts=2026-03-09T20:12:05.082Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 podman[77831]: 2026-03-09 20:12:05.09327544 +0000 UTC m=+0.029923128 container died 25d68717ab9df67a56e5e49562f175588ef1b0042cdc3a6c103de756e4f84562 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 podman[77831]: 2026-03-09 20:12:05.111736634 +0000 UTC m=+0.048384322 container remove 25d68717ab9df67a56e5e49562f175588ef1b0042cdc3a6c103de756e4f84562 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 bash[77831]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a.service: Deactivated successfully. 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 systemd[1]: Stopped Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:12:05.333 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 systemd[1]: Starting Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:12:05.334 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 podman[77901]: 2026-03-09 20:12:05.282171976 +0000 UTC m=+0.021284098 container create 865f54718b3f35819ce1b37f32bfc134da3a65e2fc073231eb2fb0565980a45f (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:12:05.334 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 podman[77901]: 2026-03-09 20:12:05.323937428 +0000 UTC m=+0.063049570 container init 865f54718b3f35819ce1b37f32bfc134da3a65e2fc073231eb2fb0565980a45f (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:12:05.334 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 podman[77901]: 2026-03-09 20:12:05.32717658 +0000 UTC m=+0.066288702 container start 865f54718b3f35819ce1b37f32bfc134da3a65e2fc073231eb2fb0565980a45f (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T20:12:05.334 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 bash[77901]: 865f54718b3f35819ce1b37f32bfc134da3a65e2fc073231eb2fb0565980a45f 2026-03-09T20:12:05.334 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 podman[77901]: 2026-03-09 20:12:05.273780791 +0000 UTC m=+0.012892922 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T20:12:05.337 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:12:05.337 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:12:05.337 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: mgrmap e38: y(active, since 4s), standbys: x 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm01.zbymog ... 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: Creating ceph-iscsi config... 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/iscsi-gateway.cfg 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/tcmu-runner-entrypoint.sh 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: Non-zero exit code 1 from systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: systemctl: stderr Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: systemctl: stderr See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: Traceback (most recent call last): 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: return _run_code(code, main_globals, None, 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: exec(code, run_globals) 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: RuntimeError: Failed command: systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog: Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: Traceback (most recent call last): 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: return self.wait_async( 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T20:12:05.338 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: return future.result(timeout) 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: return self.__get_result() 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: raise self._exception 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: out, err, code = await self._run_cephadm( 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: raise OrchestratorError( 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm01.zbymog ... 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: Creating ceph-iscsi config... 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/iscsi-gateway.cfg 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/tcmu-runner-entrypoint.sh 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: Non-zero exit code 1 from systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: systemctl: stderr Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: systemctl: stderr See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: Traceback (most recent call last): 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: return _run_code(code, main_globals, None, 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: exec(code, run_globals) 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: RuntimeError: Failed command: systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog: Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout: See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:12:05.339 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:05 vm06 ceph-mon[48187]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:05 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:05] ENGINE Bus STOPPING 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: mgrmap e38: y(active, since 4s), standbys: x 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm01.zbymog ... 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout: Creating ceph-iscsi config... 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/iscsi-gateway.cfg 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/tcmu-runner-entrypoint.sh 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout: Non-zero exit code 1 from systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout: systemctl: stderr Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout: systemctl: stderr See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.557 INFO:journalctl@ceph.mon.a.vm01.stdout: Traceback (most recent call last): 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: return _run_code(code, main_globals, None, 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: exec(code, run_globals) 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: RuntimeError: Failed command: systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog: Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: Traceback (most recent call last): 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: return self.wait_async( 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: return future.result(timeout) 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: return self.__get_result() 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: raise self._exception 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: out, err, code = await self._run_cephadm( 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: raise OrchestratorError( 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm01.zbymog ... 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: Creating ceph-iscsi config... 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/iscsi-gateway.cfg 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/tcmu-runner-entrypoint.sh 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: Non-zero exit code 1 from systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: systemctl: stderr Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: systemctl: stderr See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: Traceback (most recent call last): 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: return _run_code(code, main_globals, None, 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: exec(code, run_globals) 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: RuntimeError: Failed command: systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog: Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout: See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:12:05.558 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[49698]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: Updating vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: Updating vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/config/ceph.client.admin.keyring 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: mgrmap e38: y(active, since 4s), standbys: x 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm01.zbymog ... 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: Creating ceph-iscsi config... 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/iscsi-gateway.cfg 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/tcmu-runner-entrypoint.sh 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: Non-zero exit code 1 from systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: systemctl: stderr Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: systemctl: stderr See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: Traceback (most recent call last): 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: return _run_code(code, main_globals, None, 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: exec(code, run_globals) 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: RuntimeError: Failed command: systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog: Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: Traceback (most recent call last): 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: return self.wait_async( 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: return future.result(timeout) 2026-03-09T20:12:05.559 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: return self.__get_result() 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: raise self._exception 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: out, err, code = await self._run_cephadm( 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: raise OrchestratorError( 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm01.zbymog ... 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: Creating ceph-iscsi config... 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/iscsi-gateway.cfg 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/tcmu-runner-entrypoint.sh 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: Non-zero exit code 1 from systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: systemctl: stderr Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: systemctl: stderr See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: Traceback (most recent call last): 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: return _run_code(code, main_globals, None, 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: exec(code, run_globals) 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: RuntimeError: Failed command: systemctl restart ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog: Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout: See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T20:12:05.560 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:05 vm01 ceph-mon[53908]: Reconfiguring daemon prometheus.a on vm06 2026-03-09T20:12:05.592 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 systemd[1]: Started Ceph prometheus.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.355Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.356Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.356Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm06 (none))" 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.356Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.356Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.362Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.363Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.364Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.364Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=270.145µs 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.364Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.368Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.368Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.372Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=5 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.384Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=5 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.394Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=5 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.396Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=5 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.398Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=4 maxSegment=5 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.398Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=5 maxSegment=5 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.398Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=21.15µs wal_replay_duration=33.391229ms wbl_replay_duration=330ns total_replay_duration=33.778883ms 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.400Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.400Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.401Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.417Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=16.406027ms db_storage=792ns remote_storage=1.242µs web_handler=451ns query_engine=752ns scrape=528.63µs scrape_sd=89.467µs notify=6.723µs notify_sd=5.501µs rules=15.471667ms tracing=5.13µs 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.417Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T20:12:05.593 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:12:05 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:12:05.418Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T20:12:05.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:05 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:05] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:12:05.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:05 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:05] ENGINE Bus STOPPED 2026-03-09T20:12:05.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:05 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:05] ENGINE Bus STARTING 2026-03-09T20:12:05.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:05 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:05] ENGINE Serving on http://:::9283 2026-03-09T20:12:05.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:05 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:05] ENGINE Bus STARTED 2026-03-09T20:12:06.521 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:06.521 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:06.521 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:12:06.521 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:12:06.521 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:12:06.521 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:12:06.522 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:06.522 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:06.522 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:12:06.522 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:12:06.522 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:12:06.522 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:06 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:06.522 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 systemd[1]: Stopping Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:12:06.522 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 podman[78165]: 2026-03-09 20:12:06.484496609 +0000 UTC m=+0.113963235 container died 0b7c9ce0e14587682170d50ecc0047870d25cd4244b88915a2e929f67c9e00b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:12:06.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:12:06.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:06 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:06.838 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 podman[78165]: 2026-03-09 20:12:06.520391694 +0000 UTC m=+0.149858320 container remove 0b7c9ce0e14587682170d50ecc0047870d25cd4244b88915a2e929f67c9e00b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=squid, ceph=True, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:12:06.838 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 bash[78165]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x 2026-03-09T20:12:06.838 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-09T20:12:06.838 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x.service: Failed with result 'exit-code'. 2026-03-09T20:12:06.838 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 systemd[1]: Stopped Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:12:06.838 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x.service: Consumed 20.897s CPU time. 2026-03-09T20:12:06.838 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 systemd[1]: Starting Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:12:07.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:06 vm06 podman[78284]: 2026-03-09 20:12:06.986061376 +0000 UTC m=+0.024339387 container create 7af24f632a8bec68de0b6a5169a17c06ced05f63d7a76ef65cb275913ba760e9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS) 2026-03-09T20:12:07.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:07 vm06 podman[78284]: 2026-03-09 20:12:07.031994839 +0000 UTC m=+0.070272840 container init 7af24f632a8bec68de0b6a5169a17c06ced05f63d7a76ef65cb275913ba760e9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0) 2026-03-09T20:12:07.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:07 vm06 podman[78284]: 2026-03-09 20:12:07.03550532 +0000 UTC m=+0.073783331 container start 7af24f632a8bec68de0b6a5169a17c06ced05f63d7a76ef65cb275913ba760e9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:12:07.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:07 vm06 bash[78284]: 7af24f632a8bec68de0b6a5169a17c06ced05f63d7a76ef65cb275913ba760e9 2026-03-09T20:12:07.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:07 vm06 podman[78284]: 2026-03-09 20:12:06.976291249 +0000 UTC m=+0.014569260 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:07.093 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:07 vm06 systemd[1]: Started Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:12:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:06.982Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:06.982Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:07.392 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:07.188+0000 7fa5b90f3140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T20:12:07.393 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:07.236+0000 7fa5b90f3140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T20:12:07.731 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:07 vm06 ceph-mon[48187]: Upgrade: Updating mgr.x 2026-03-09T20:12:07.731 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:07 vm06 ceph-mon[48187]: Deploying daemon mgr.x on vm06 2026-03-09T20:12:07.731 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:07 vm06 ceph-mon[48187]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:07.731 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:07 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:07.731 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:07 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[49698]: Upgrade: Updating mgr.x 2026-03-09T20:12:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[49698]: Deploying daemon mgr.x on vm06 2026-03-09T20:12:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[49698]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:07.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:07.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:07.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[53908]: Upgrade: Updating mgr.x 2026-03-09T20:12:07.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[53908]: Deploying daemon mgr.x on vm06 2026-03-09T20:12:07.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[53908]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:12:07.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:07.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:07 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:08.037 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:07 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:07.763+0000 7fa5b90f3140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T20:12:08.298 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:08.243+0000 7fa5b90f3140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T20:12:08.582 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T20:12:08.582 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T20:12:08.582 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: from numpy import show_config as show_numpy_config 2026-03-09T20:12:08.582 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:08.374+0000 7fa5b90f3140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T20:12:08.582 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:08.419+0000 7fa5b90f3140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T20:12:08.582 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:08 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:08.514+0000 7fa5b90f3140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T20:12:09.466 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:09.184+0000 7fa5b90f3140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T20:12:09.466 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:09.311+0000 7fa5b90f3140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:12:09.466 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:09.367+0000 7fa5b90f3140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T20:12:09.466 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:09.406+0000 7fa5b90f3140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T20:12:09.466 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:09.464+0000 7fa5b90f3140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T20:12:09.727 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:09.506+0000 7fa5b90f3140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T20:12:09.728 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:09 vm06 ceph-mon[48187]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T20:12:09.728 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:09 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.728 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:09 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.728 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:09 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.728 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:09 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[49698]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T20:12:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[53908]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T20:12:09.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:09.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:09 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:10.067 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:09.750+0000 7fa5b90f3140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T20:12:10.067 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:09 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:09.821+0000 7fa5b90f3140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T20:12:10.067 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:10 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:10.065+0000 7fa5b90f3140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T20:12:10.704 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:10 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:10.386+0000 7fa5b90f3140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T20:12:10.704 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:10 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:10.430+0000 7fa5b90f3140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T20:12:10.704 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:10 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:10.475+0000 7fa5b90f3140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T20:12:10.704 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:10 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:10.574+0000 7fa5b90f3140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T20:12:10.704 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:10 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:10.614+0000 7fa5b90f3140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T20:12:10.704 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:10 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:10.701+0000 7fa5b90f3140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T20:12:11.009 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:10 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:10.853+0000 7fa5b90f3140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:12:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:11.008+0000 7fa5b90f3140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T20:12:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:11.050+0000 7fa5b90f3140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T20:12:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: [09/Mar/2026:20:12:11] ENGINE Bus STARTING 2026-03-09T20:12:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: CherryPy Checker: 2026-03-09T20:12:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: The Application mounted at '' has an empty config. 2026-03-09T20:12:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:12:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: [09/Mar/2026:20:12:11] ENGINE Serving on http://:::9283 2026-03-09T20:12:11.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:12:11 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: [09/Mar/2026:20:12:11] ENGINE Bus STARTED 2026-03-09T20:12:11.703 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-09T20:12:11.703 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.703 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.703 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:11.703 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:11.703 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.703 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:11.703 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: Standby manager daemon x restarted 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: Standby manager daemon x started 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: Standby manager daemon x restarted 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: Standby manager daemon x started 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:12:11.704 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:11 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:12:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-09T20:12:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: Standby manager daemon x restarted 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: Standby manager daemon x started 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.? 192.168.123.106:0/3519033574' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:12:11.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:11 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:12:12.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:12:12.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all mgr 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all crash 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all mds 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all nfs 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all node-exporter 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all prometheus 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: Upgrade: Setting container_image for all alertmanager 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: mgrmap e39: y(active, since 12s), standbys: x 2026-03-09T20:12:12.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:12 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/4231053968' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:12:12.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:12:12] "GET /metrics HTTP/1.1" 200 34541 "" "Prometheus/2.51.0" 2026-03-09T20:12:12.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:12:12.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all mgr 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all crash 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all mds 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all nfs 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all node-exporter 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all prometheus 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all alertmanager 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: mgrmap e39: y(active, since 12s), standbys: x 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/4231053968' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Reconfiguring iscsi.foo.vm01.zbymog (dependencies changed)... 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Reconfiguring daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all mgr 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all crash 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all mds 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all nfs 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all node-exporter 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all prometheus 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all alertmanager 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: mgrmap e39: y(active, since 12s), standbys: x 2026-03-09T20:12:12.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:12 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/4231053968' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:12:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:13 vm06 ceph-mon[48187]: Upgrade: Updating grafana.a 2026-03-09T20:12:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:13 vm06 ceph-mon[48187]: Deploying daemon grafana.a on vm06 2026-03-09T20:12:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:13 vm06 ceph-mon[48187]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:12:13.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:13 vm01 ceph-mon[49698]: Upgrade: Updating grafana.a 2026-03-09T20:12:13.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:13 vm01 ceph-mon[49698]: Deploying daemon grafana.a on vm06 2026-03-09T20:12:13.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:13 vm01 ceph-mon[49698]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:12:13.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:13 vm01 ceph-mon[53908]: Upgrade: Updating grafana.a 2026-03-09T20:12:13.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:13 vm01 ceph-mon[53908]: Deploying daemon grafana.a on vm06 2026-03-09T20:12:13.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:13 vm01 ceph-mon[53908]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:12:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:15 vm06 ceph-mon[48187]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:12:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:15 vm01 ceph-mon[49698]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:12:15.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:15 vm01 ceph-mon[53908]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:12:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:16 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:16 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:16 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:16.983Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:16.984Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:17 vm01 ceph-mon[49698]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:12:17.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:17 vm01 ceph-mon[53908]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:12:18.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:17 vm06 ceph-mon[48187]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:12:18.879 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:18 vm06 systemd[1]: Stopping Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:12:18.879 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:18 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[69933]: t=2026-03-09T20:12:18+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-09T20:12:18.879 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:18 vm06 podman[79498]: 2026-03-09 20:12:18.706420269 +0000 UTC m=+0.017041979 container died 694e845e2157592bbb2e6c5b5353b6249a7296fd6373c484e293e620a6070068 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, description=Ceph Grafana Container, io.openshift.expose-services=, distribution-scope=public, summary=Grafana Container configured for Ceph mgr/dashboard integration, io.k8s.display-name=Red Hat Universal Base Image 8, vcs-type=git, io.buildah.version=1.24.2, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, maintainer=Paul Cuzner , io.openshift.tags=base rhel8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.component=ubi8-container, name=ubi8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=236.1648460182, version=8.5, build-date=2022-03-28T10:36:18.413762) 2026-03-09T20:12:18.879 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:18 vm06 podman[79498]: 2026-03-09 20:12:18.728944698 +0000 UTC m=+0.039566408 container remove 694e845e2157592bbb2e6c5b5353b6249a7296fd6373c484e293e620a6070068 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, io.k8s.display-name=Red Hat Universal Base Image 8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, build-date=2022-03-28T10:36:18.413762, vendor=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, architecture=x86_64, maintainer=Paul Cuzner , release=236.1648460182, io.openshift.tags=base rhel8, summary=Grafana Container configured for Ceph mgr/dashboard integration, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8.5, description=Ceph Grafana Container, io.openshift.expose-services=, com.redhat.component=ubi8-container, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.buildah.version=1.24.2) 2026-03-09T20:12:18.879 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:18 vm06 bash[79498]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a 2026-03-09T20:12:18.879 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:18 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@grafana.a.service: Deactivated successfully. 2026-03-09T20:12:18.879 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:18 vm06 systemd[1]: Stopped Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:12:18.879 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:18 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@grafana.a.service: Consumed 2.278s CPU time. 2026-03-09T20:12:19.312 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:18 vm06 systemd[1]: Starting Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:12:19.312 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 podman[79609]: 2026-03-09 20:12:19.129985634 +0000 UTC m=+0.040603960 container create d2acbfd0f7924831703f7db53002b26329bef80d28e1c16c8d7fcabee53df8d0 (image=quay.io/ceph/grafana:10.4.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, maintainer=Grafana Labs ) 2026-03-09T20:12:19.312 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 podman[79609]: 2026-03-09 20:12:19.175372474 +0000 UTC m=+0.085990810 container init d2acbfd0f7924831703f7db53002b26329bef80d28e1c16c8d7fcabee53df8d0 (image=quay.io/ceph/grafana:10.4.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, maintainer=Grafana Labs ) 2026-03-09T20:12:19.312 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 podman[79609]: 2026-03-09 20:12:19.179563779 +0000 UTC m=+0.090182105 container start d2acbfd0f7924831703f7db53002b26329bef80d28e1c16c8d7fcabee53df8d0 (image=quay.io/ceph/grafana:10.4.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, maintainer=Grafana Labs ) 2026-03-09T20:12:19.312 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 bash[79609]: d2acbfd0f7924831703f7db53002b26329bef80d28e1c16c8d7fcabee53df8d0 2026-03-09T20:12:19.312 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 podman[79609]: 2026-03-09 20:12:19.105704066 +0000 UTC m=+0.016322402 image pull c8b91775d855b99270fc5d22f3c6737e8cca01ef4c25c8b0362295e0746fa39b quay.io/ceph/grafana:10.4.0 2026-03-09T20:12:19.312 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 systemd[1]: Started Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312271477Z level=info msg="Starting Grafana" version=10.4.0 commit=03f502a94d17f7dc4e6c34acdf8428aedd986e4c branch=HEAD compiled=2026-03-09T20:12:19Z 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312653142Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312657619Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312660144Z level=info msg="Config overridden from command line" arg="default.paths.data=/var/lib/grafana" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312662018Z level=info msg="Config overridden from command line" arg="default.paths.logs=/var/log/grafana" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312664383Z level=info msg="Config overridden from command line" arg="default.paths.plugins=/var/lib/grafana/plugins" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312667729Z level=info msg="Config overridden from command line" arg="default.paths.provisioning=/etc/grafana/provisioning" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.31267368Z level=info msg="Config overridden from command line" arg="default.log.mode=console" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312676806Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312678799Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312680473Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312682165Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312683869Z level=info msg=Target target=[all] 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312688437Z level=info msg="Path Home" path=/usr/share/grafana 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312690301Z level=info msg="Path Data" path=/var/lib/grafana 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312692084Z level=info msg="Path Logs" path=/var/log/grafana 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312693637Z level=info msg="Path Plugins" path=/var/lib/grafana/plugins 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.31269527Z level=info msg="Path Provisioning" path=/etc/grafana/provisioning 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=settings t=2026-03-09T20:12:19.312696953Z level=info msg="App mode production" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=sqlstore t=2026-03-09T20:12:19.312836465Z level=info msg="Connecting to DB" dbtype=sqlite3 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=sqlstore t=2026-03-09T20:12:19.313227737Z level=warn msg="SQLite database file has broader permissions than it should" path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.313986146Z level=info msg="Starting DB migrations" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.319180098Z level=info msg="Executing migration" id="Update is_service_account column to nullable" 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.344671771Z level=info msg="Migration successfully executed" id="Update is_service_account column to nullable" duration=25.487525ms 2026-03-09T20:12:19.565 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.345673755Z level=info msg="Executing migration" id="Add uid column to user" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.34834738Z level=info msg="Migration successfully executed" id="Add uid column to user" duration=2.672703ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.349025319Z level=info msg="Executing migration" id="Update uid column values for users" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.349177424Z level=info msg="Migration successfully executed" id="Update uid column values for users" duration=152.254µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.349742611Z level=info msg="Executing migration" id="Add unique index user_uid" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.350311026Z level=info msg="Migration successfully executed" id="Add unique index user_uid" duration=568.204µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.351144406Z level=info msg="Executing migration" id="Add isPublic for dashboard" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.353801458Z level=info msg="Migration successfully executed" id="Add isPublic for dashboard" duration=2.678943ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.354506438Z level=info msg="Executing migration" id="set service account foreign key to nil if 0" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.354600955Z level=info msg="Migration successfully executed" id="set service account foreign key to nil if 0" duration=94.146µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.355194306Z level=info msg="Executing migration" id="Add last_used_at to api_key table" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.358170907Z level=info msg="Migration successfully executed" id="Add last_used_at to api_key table" duration=2.975289ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.359105106Z level=info msg="Executing migration" id="Add is_revoked column to api_key table" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.362856707Z level=info msg="Migration successfully executed" id="Add is_revoked column to api_key table" duration=3.750188ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.363898537Z level=info msg="Executing migration" id="Add playlist column created_at" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.366583092Z level=info msg="Migration successfully executed" id="Add playlist column created_at" duration=2.671951ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.367201449Z level=info msg="Executing migration" id="Add playlist column updated_at" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.370543605Z level=info msg="Migration successfully executed" id="Add playlist column updated_at" duration=3.340663ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.371273021Z level=info msg="Executing migration" id="Add column preferences.json_data" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.374391066Z level=info msg="Migration successfully executed" id="Add column preferences.json_data" duration=3.116242ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.375590622Z level=info msg="Executing migration" id="alter preferences.json_data to mediumtext v1" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.375621649Z level=info msg="Migration successfully executed" id="alter preferences.json_data to mediumtext v1" duration=30.457µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.376244655Z level=info msg="Executing migration" id="Add preferences index org_id" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.377364692Z level=info msg="Migration successfully executed" id="Add preferences index org_id" duration=1.119927ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.378108623Z level=info msg="Executing migration" id="Add preferences index user_id" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.378616254Z level=info msg="Migration successfully executed" id="Add preferences index user_id" duration=507.931µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.379320352Z level=info msg="Executing migration" id="Increase tags column to length 4096" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.379346882Z level=info msg="Migration successfully executed" id="Increase tags column to length 4096" duration=26.601µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.379941385Z level=info msg="Executing migration" id="Add column uid in team" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.382346516Z level=info msg="Migration successfully executed" id="Add column uid in team" duration=2.371298ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.3829687Z level=info msg="Executing migration" id="Update uid column values in team" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.383062656Z level=info msg="Migration successfully executed" id="Update uid column values in team" duration=94.185µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.383717021Z level=info msg="Executing migration" id="Add unique index team_org_id_uid" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.384363241Z level=info msg="Migration successfully executed" id="Add unique index team_org_id_uid" duration=646.26µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.38506816Z level=info msg="Executing migration" id="Add OAuth ID token to user_auth" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.387909648Z level=info msg="Migration successfully executed" id="Add OAuth ID token to user_auth" duration=2.838533ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.388492771Z level=info msg="Executing migration" id="add index user_auth_token.revoked_at" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.388922344Z level=info msg="Migration successfully executed" id="add index user_auth_token.revoked_at" duration=429.123µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.390173356Z level=info msg="Executing migration" id="alter table short_url alter column created_by type to bigint" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.390201197Z level=info msg="Migration successfully executed" id="alter table short_url alter column created_by type to bigint" duration=28.484µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.390823783Z level=info msg="Executing migration" id="add current_reason column related to current_state" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.393184752Z level=info msg="Migration successfully executed" id="add current_reason column related to current_state" duration=2.359436ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.393822566Z level=info msg="Executing migration" id="add result_fingerprint column to alert_instance" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.396113314Z level=info msg="Migration successfully executed" id="add result_fingerprint column to alert_instance" duration=2.289345ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.39676859Z level=info msg="Executing migration" id="add rule_group_idx column to alert_rule" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.398957738Z level=info msg="Migration successfully executed" id="add rule_group_idx column to alert_rule" duration=2.184568ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.399821234Z level=info msg="Executing migration" id="add is_paused column to alert_rule table" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.402190268Z level=info msg="Migration successfully executed" id="add is_paused column to alert_rule table" duration=2.367822ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.402870301Z level=info msg="Executing migration" id="fix is_paused column for alert_rule table" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.40289663Z level=info msg="Migration successfully executed" id="fix is_paused column for alert_rule table" duration=27.05µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.404397989Z level=info msg="Executing migration" id="add rule_group_idx column to alert_rule_version" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.407284803Z level=info msg="Migration successfully executed" id="add rule_group_idx column to alert_rule_version" duration=2.885661ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.408870842Z level=info msg="Executing migration" id="add is_paused column to alert_rule_versions table" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.41137577Z level=info msg="Migration successfully executed" id="add is_paused column to alert_rule_versions table" duration=2.504006ms 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.412447145Z level=info msg="Executing migration" id="fix is_paused column for alert_rule_version table" 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.412475588Z level=info msg="Migration successfully executed" id="fix is_paused column for alert_rule_version table" duration=29.025µs 2026-03-09T20:12:19.566 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.413050525Z level=info msg="Executing migration" id="add configuration_hash column to alert_configuration" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.41538821Z level=info msg="Migration successfully executed" id="add configuration_hash column to alert_configuration" duration=2.335902ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.416003402Z level=info msg="Executing migration" id="add column send_alerts_to in ngalert_configuration" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.418405487Z level=info msg="Migration successfully executed" id="add column send_alerts_to in ngalert_configuration" duration=2.400143ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.419046066Z level=info msg="Executing migration" id="create provenance_type table" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.419562072Z level=info msg="Migration successfully executed" id="create provenance_type table" duration=516.627µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.420374272Z level=info msg="Executing migration" id="add index to uniquify (record_key, record_type, org_id) columns" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.420961281Z level=info msg="Migration successfully executed" id="add index to uniquify (record_key, record_type, org_id) columns" duration=587.198µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.422459806Z level=info msg="Executing migration" id="create alert_image table" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.430254155Z level=info msg="Migration successfully executed" id="create alert_image table" duration=7.78956ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.431404137Z level=info msg="Executing migration" id="add unique index on token to alert_image table" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.432093838Z level=info msg="Migration successfully executed" id="add unique index on token to alert_image table" duration=693.406µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.432863127Z level=info msg="Executing migration" id="support longer URLs in alert_image table" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.43290709Z level=info msg="Migration successfully executed" id="support longer URLs in alert_image table" duration=44.363µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.43425365Z level=info msg="Executing migration" id=create_alert_configuration_history_table 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.434806555Z level=info msg="Migration successfully executed" id=create_alert_configuration_history_table duration=552.936µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.437690113Z level=info msg="Executing migration" id="drop non-unique orgID index on alert_configuration" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.439265902Z level=info msg="Migration successfully executed" id="drop non-unique orgID index on alert_configuration" duration=1.578042ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.440097057Z level=info msg="Executing migration" id="drop unique orgID index on alert_configuration if exists" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.440268699Z level=warn msg="Skipping migration: Already executed, but not recorded in migration log" id="drop unique orgID index on alert_configuration if exists" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.441441224Z level=info msg="Executing migration" id="extract alertmanager configuration history to separate table" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.441870457Z level=info msg="Migration successfully executed" id="extract alertmanager configuration history to separate table" duration=429.514µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.443489187Z level=info msg="Executing migration" id="add unique index on orgID to alert_configuration" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.444022044Z level=info msg="Migration successfully executed" id="add unique index on orgID to alert_configuration" duration=533.137µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.444585649Z level=info msg="Executing migration" id="add last_applied column to alert_configuration_history" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.446878731Z level=info msg="Migration successfully executed" id="add last_applied column to alert_configuration_history" duration=2.291159ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.447490055Z level=info msg="Executing migration" id="increase max description length to 2048" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.447504021Z level=info msg="Migration successfully executed" id="increase max description length to 2048" duration=13.826µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.448082805Z level=info msg="Executing migration" id="alter library_element model to mediumtext" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.44811279Z level=info msg="Migration successfully executed" id="alter library_element model to mediumtext" duration=30.317µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.448732401Z level=info msg="Executing migration" id="create secrets table" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.449189686Z level=info msg="Migration successfully executed" id="create secrets table" duration=457.045µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.450971491Z level=info msg="Executing migration" id="rename data_keys name column to id" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.466569495Z level=info msg="Migration successfully executed" id="rename data_keys name column to id" duration=15.591843ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.468945052Z level=info msg="Executing migration" id="add name column into data_keys" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.472642112Z level=info msg="Migration successfully executed" id="add name column into data_keys" duration=3.694516ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.473369353Z level=info msg="Executing migration" id="copy data_keys id column values into name" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.473461105Z level=info msg="Migration successfully executed" id="copy data_keys id column values into name" duration=92.193µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.474070135Z level=info msg="Executing migration" id="rename data_keys name column to label" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.488824581Z level=info msg="Migration successfully executed" id="rename data_keys name column to label" duration=14.749996ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.490287599Z level=info msg="Executing migration" id="rename data_keys id column back to name" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.50418465Z level=info msg="Migration successfully executed" id="rename data_keys id column back to name" duration=13.895459ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.505307732Z level=info msg="Executing migration" id="add column hidden to role table" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.50902558Z level=info msg="Migration successfully executed" id="add column hidden to role table" duration=3.713601ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.51011531Z level=info msg="Executing migration" id="permission kind migration" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.5166016Z level=info msg="Migration successfully executed" id="permission kind migration" duration=6.484486ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.517442674Z level=info msg="Executing migration" id="permission attribute migration" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.522797147Z level=info msg="Migration successfully executed" id="permission attribute migration" duration=5.350635ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.524552061Z level=info msg="Executing migration" id="permission identifier migration" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.52793349Z level=info msg="Migration successfully executed" id="permission identifier migration" duration=3.379315ms 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.530815996Z level=info msg="Executing migration" id="add permission identifier index" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.531715569Z level=info msg="Migration successfully executed" id="add permission identifier index" duration=901.087µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.532530695Z level=info msg="Executing migration" id="add permission action scope role_id index" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.532952344Z level=info msg="Migration successfully executed" id="add permission action scope role_id index" duration=421.589µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.533656893Z level=info msg="Executing migration" id="remove permission role_id action scope index" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.534139325Z level=info msg="Migration successfully executed" id="remove permission role_id action scope index" duration=482.242µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.53465459Z level=info msg="Executing migration" id="create query_history table v1" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.535084785Z level=info msg="Migration successfully executed" id="create query_history table v1" duration=430.185µs 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.536332541Z level=info msg="Executing migration" id="add index query_history.org_id-created_by-datasource_uid" 2026-03-09T20:12:19.567 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.536753398Z level=info msg="Migration successfully executed" id="add index query_history.org_id-created_by-datasource_uid" duration=420.656µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.53743759Z level=info msg="Executing migration" id="alter table query_history alter column created_by type to bigint" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.537462416Z level=info msg="Migration successfully executed" id="alter table query_history alter column created_by type to bigint" duration=25.398µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.53805775Z level=info msg="Executing migration" id="rbac disabled migrator" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.538076365Z level=info msg="Migration successfully executed" id="rbac disabled migrator" duration=18.535µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.53881678Z level=info msg="Executing migration" id="teams permissions migration" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.539082688Z level=info msg="Migration successfully executed" id="teams permissions migration" duration=265.777µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.540736594Z level=info msg="Executing migration" id="dashboard permissions" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.542720858Z level=info msg="Migration successfully executed" id="dashboard permissions" duration=1.984664ms 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.543495117Z level=info msg="Executing migration" id="dashboard permissions uid scopes" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.544895388Z level=info msg="Migration successfully executed" id="dashboard permissions uid scopes" duration=1.40028ms 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.545478149Z level=info msg="Executing migration" id="drop managed folder create actions" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.54558587Z level=info msg="Migration successfully executed" id="drop managed folder create actions" duration=107.721µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.547169604Z level=info msg="Executing migration" id="alerting notification permissions" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.547419803Z level=info msg="Migration successfully executed" id="alerting notification permissions" duration=250.089µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.548072835Z level=info msg="Executing migration" id="create query_history_star table v1" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.548566299Z level=info msg="Migration successfully executed" id="create query_history_star table v1" duration=493.274µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.549577592Z level=info msg="Executing migration" id="add index query_history.user_id-query_uid" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.550131999Z level=info msg="Migration successfully executed" id="add index query_history.user_id-query_uid" duration=542.976µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.550820187Z level=info msg="Executing migration" id="add column org_id in query_history_star" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.554119433Z level=info msg="Migration successfully executed" id="add column org_id in query_history_star" duration=3.299045ms 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.555059281Z level=info msg="Executing migration" id="alter table query_history_star_mig column user_id type to bigint" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.555086964Z level=info msg="Migration successfully executed" id="alter table query_history_star_mig column user_id type to bigint" duration=28.244µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.555686606Z level=info msg="Executing migration" id="create correlation table v1" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.556315162Z level=info msg="Migration successfully executed" id="create correlation table v1" duration=628.235µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.557026734Z level=info msg="Executing migration" id="add index correlations.uid" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.557515618Z level=info msg="Migration successfully executed" id="add index correlations.uid" duration=489.075µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.558206462Z level=info msg="Executing migration" id="add index correlations.source_uid" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.558657196Z level=info msg="Migration successfully executed" id="add index correlations.source_uid" duration=450.604µs 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.559379958Z level=info msg="Executing migration" id="add correlation config column" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.561941202Z level=info msg="Migration successfully executed" id="add correlation config column" duration=2.560914ms 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.562553187Z level=info msg="Executing migration" id="drop index IDX_correlation_uid - v1" 2026-03-09T20:12:19.568 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.563213724Z level=info msg="Migration successfully executed" id="drop index IDX_correlation_uid - v1" duration=660.266µs 2026-03-09T20:12:19.815 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:19 vm06 ceph-mon[48187]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:12:19.815 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:19 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.815 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:19 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.815 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:19 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.815 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:19 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.815 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:19 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.815 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:19 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.566625149Z level=info msg="Executing migration" id="drop index IDX_correlation_source_uid - v1" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.567031861Z level=info msg="Migration successfully executed" id="drop index IDX_correlation_source_uid - v1" duration=406.662µs 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.567589144Z level=info msg="Executing migration" id="Rename table correlation to correlation_tmp_qwerty - v1" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.579081675Z level=info msg="Migration successfully executed" id="Rename table correlation to correlation_tmp_qwerty - v1" duration=11.489897ms 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.579950389Z level=info msg="Executing migration" id="create correlation v2" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.580690686Z level=info msg="Migration successfully executed" id="create correlation v2" duration=740.556µs 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.583323554Z level=info msg="Executing migration" id="create index IDX_correlation_uid - v2" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.584002544Z level=info msg="Migration successfully executed" id="create index IDX_correlation_uid - v2" duration=679.291µs 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.585345539Z level=info msg="Executing migration" id="create index IDX_correlation_source_uid - v2" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.585867284Z level=info msg="Migration successfully executed" id="create index IDX_correlation_source_uid - v2" duration=520.364µs 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.586614603Z level=info msg="Executing migration" id="create index IDX_correlation_org_id - v2" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.587099442Z level=info msg="Migration successfully executed" id="create index IDX_correlation_org_id - v2" duration=484.848µs 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.588211733Z level=info msg="Executing migration" id="copy correlation v1 to v2" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.588342809Z level=info msg="Migration successfully executed" id="copy correlation v1 to v2" duration=131.416µs 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.589307474Z level=info msg="Executing migration" id="drop correlation_tmp_qwerty" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.589768296Z level=info msg="Migration successfully executed" id="drop correlation_tmp_qwerty" duration=460.362µs 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.590795869Z level=info msg="Executing migration" id="add provisioning column" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.594946317Z level=info msg="Migration successfully executed" id="add provisioning column" duration=4.147111ms 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.59626192Z level=info msg="Executing migration" id="create entity_events table" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.596805477Z level=info msg="Migration successfully executed" id="create entity_events table" duration=543.939µs 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.598095161Z level=info msg="Executing migration" id="create dashboard public config v1" 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.598861216Z level=info msg="Migration successfully executed" id="create dashboard public config v1" duration=766.314µs 2026-03-09T20:12:19.819 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.599832924Z level=info msg="Executing migration" id="drop index UQE_dashboard_public_config_uid - v1" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.600289037Z level=warn msg="Skipping migration: Already executed, but not recorded in migration log" id="drop index UQE_dashboard_public_config_uid - v1" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.600975041Z level=info msg="Executing migration" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v1" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.602610824Z level=warn msg="Skipping migration: Already executed, but not recorded in migration log" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v1" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.605781398Z level=info msg="Executing migration" id="Drop old dashboard public config table" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.606524519Z level=info msg="Migration successfully executed" id="Drop old dashboard public config table" duration=739.796µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.607362527Z level=info msg="Executing migration" id="recreate dashboard public config v1" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.608080551Z level=info msg="Migration successfully executed" id="recreate dashboard public config v1" duration=718.165µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.608871592Z level=info msg="Executing migration" id="create index UQE_dashboard_public_config_uid - v1" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.609441899Z level=info msg="Migration successfully executed" id="create index UQE_dashboard_public_config_uid - v1" duration=567.613µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.610241155Z level=info msg="Executing migration" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v1" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.610843272Z level=info msg="Migration successfully executed" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v1" duration=601.896µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.611632069Z level=info msg="Executing migration" id="drop index UQE_dashboard_public_config_uid - v2" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.612152653Z level=info msg="Migration successfully executed" id="drop index UQE_dashboard_public_config_uid - v2" duration=520.574µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.612691081Z level=info msg="Executing migration" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v2" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.613168444Z level=info msg="Migration successfully executed" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v2" duration=477.564µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.613676866Z level=info msg="Executing migration" id="Drop public config table" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.614161402Z level=info msg="Migration successfully executed" id="Drop public config table" duration=484.437µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.615424697Z level=info msg="Executing migration" id="Recreate dashboard public config v2" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.617327619Z level=info msg="Migration successfully executed" id="Recreate dashboard public config v2" duration=1.900647ms 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.618358909Z level=info msg="Executing migration" id="create index UQE_dashboard_public_config_uid - v2" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.618841703Z level=info msg="Migration successfully executed" id="create index UQE_dashboard_public_config_uid - v2" duration=487.313µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.619364271Z level=info msg="Executing migration" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v2" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.619918328Z level=info msg="Migration successfully executed" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v2" duration=553.847µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.620719247Z level=info msg="Executing migration" id="create index UQE_dashboard_public_config_access_token - v2" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.62120719Z level=info msg="Migration successfully executed" id="create index UQE_dashboard_public_config_access_token - v2" duration=486.33µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.62193341Z level=info msg="Executing migration" id="Rename table dashboard_public_config to dashboard_public - v2" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.630277627Z level=info msg="Migration successfully executed" id="Rename table dashboard_public_config to dashboard_public - v2" duration=8.337996ms 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.631266849Z level=info msg="Executing migration" id="add annotations_enabled column" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.635876076Z level=info msg="Migration successfully executed" id="add annotations_enabled column" duration=4.605359ms 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.636902757Z level=info msg="Executing migration" id="add time_selection_enabled column" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.639953868Z level=info msg="Migration successfully executed" id="add time_selection_enabled column" duration=3.048927ms 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.640954261Z level=info msg="Executing migration" id="delete orphaned public dashboards" 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.641229897Z level=info msg="Migration successfully executed" id="delete orphaned public dashboards" duration=277.02µs 2026-03-09T20:12:19.820 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.641912464Z level=info msg="Executing migration" id="add share column" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.645841929Z level=info msg="Migration successfully executed" id="add share column" duration=3.925958ms 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.646927982Z level=info msg="Executing migration" id="backfill empty share column fields with default of public" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.647219437Z level=info msg="Migration successfully executed" id="backfill empty share column fields with default of public" duration=291.806µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.647942751Z level=info msg="Executing migration" id="create file table" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.648460179Z level=info msg="Migration successfully executed" id="create file table" duration=517.438µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.649239007Z level=info msg="Executing migration" id="file table idx: path natural pk" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.649763178Z level=info msg="Migration successfully executed" id="file table idx: path natural pk" duration=524.152µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.650462277Z level=info msg="Executing migration" id="file table idx: parent_folder_path_hash fast folder retrieval" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.651056438Z level=info msg="Migration successfully executed" id="file table idx: parent_folder_path_hash fast folder retrieval" duration=594.873µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.651825398Z level=info msg="Executing migration" id="create file_meta table" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.652361572Z level=info msg="Migration successfully executed" id="create file_meta table" duration=536.575µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.653160787Z level=info msg="Executing migration" id="file table idx: path key" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.653672335Z level=info msg="Migration successfully executed" id="file table idx: path key" duration=512.039µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.654392984Z level=info msg="Executing migration" id="set path collation in file table" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.654488102Z level=info msg="Migration successfully executed" id="set path collation in file table" duration=95.438µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.655145502Z level=info msg="Executing migration" id="migrate contents column to mediumblob for MySQL" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.655237845Z level=info msg="Migration successfully executed" id="migrate contents column to mediumblob for MySQL" duration=92.834µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.655928217Z level=info msg="Executing migration" id="managed permissions migration" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.657775495Z level=info msg="Migration successfully executed" id="managed permissions migration" duration=1.845995ms 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.658731274Z level=info msg="Executing migration" id="managed folder permissions alert actions migration" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.659818229Z level=info msg="Migration successfully executed" id="managed folder permissions alert actions migration" duration=1.085281ms 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.660584873Z level=info msg="Executing migration" id="RBAC action name migrator" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.661560119Z level=info msg="Migration successfully executed" id="RBAC action name migrator" duration=974.724µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.662235773Z level=info msg="Executing migration" id="Add UID column to playlist" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.665415654Z level=info msg="Migration successfully executed" id="Add UID column to playlist" duration=3.178088ms 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.666276666Z level=info msg="Executing migration" id="Update uid column values in playlist" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.666448407Z level=info msg="Migration successfully executed" id="Update uid column values in playlist" duration=175.228µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.667033934Z level=info msg="Executing migration" id="Add index for uid in playlist" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.667630952Z level=info msg="Migration successfully executed" id="Add index for uid in playlist" duration=597.118µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.668362931Z level=info msg="Executing migration" id="update group index for alert rules" 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.668874268Z level=info msg="Migration successfully executed" id="update group index for alert rules" duration=510.957µs 2026-03-09T20:12:19.821 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.66964513Z level=info msg="Executing migration" id="managed folder permissions alert actions repeated migration" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.670525148Z level=info msg="Migration successfully executed" id="managed folder permissions alert actions repeated migration" duration=879.967µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.671235267Z level=info msg="Executing migration" id="admin only folder/dashboard permission" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.671663989Z level=info msg="Migration successfully executed" id="admin only folder/dashboard permission" duration=426.678µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.672329886Z level=info msg="Executing migration" id="add action column to seed_assignment" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.67669153Z level=info msg="Migration successfully executed" id="add action column to seed_assignment" duration=4.357525ms 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.677881378Z level=info msg="Executing migration" id="add scope column to seed_assignment" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.682028088Z level=info msg="Migration successfully executed" id="add scope column to seed_assignment" duration=4.15113ms 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.683176829Z level=info msg="Executing migration" id="remove unique index builtin_role_role_name before nullable update" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.684009737Z level=info msg="Migration successfully executed" id="remove unique index builtin_role_role_name before nullable update" duration=833.16µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.684880276Z level=info msg="Executing migration" id="update seed_assignment role_name column to nullable" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.725447466Z level=info msg="Migration successfully executed" id="update seed_assignment role_name column to nullable" duration=40.557621ms 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.726435836Z level=info msg="Executing migration" id="add unique index builtin_role_name back" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.727059424Z level=info msg="Migration successfully executed" id="add unique index builtin_role_name back" duration=623.979µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.72769939Z level=info msg="Executing migration" id="add unique index builtin_role_action_scope" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.728242668Z level=info msg="Migration successfully executed" id="add unique index builtin_role_action_scope" duration=543.408µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.729555645Z level=info msg="Executing migration" id="add primary key to seed_assigment" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.742520091Z level=info msg="Migration successfully executed" id="add primary key to seed_assigment" duration=12.961339ms 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.74891001Z level=info msg="Executing migration" id="add origin column to seed_assignment" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.75223275Z level=info msg="Migration successfully executed" id="add origin column to seed_assignment" duration=3.32257ms 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.753111124Z level=info msg="Executing migration" id="add origin to plugin seed_assignment" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.753304807Z level=info msg="Migration successfully executed" id="add origin to plugin seed_assignment" duration=193.944µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.754080598Z level=info msg="Executing migration" id="prevent seeding OnCall access" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.75422568Z level=info msg="Migration successfully executed" id="prevent seeding OnCall access" duration=130.475µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.754858515Z level=info msg="Executing migration" id="managed folder permissions alert actions repeated fixed migration" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.755451244Z level=info msg="Migration successfully executed" id="managed folder permissions alert actions repeated fixed migration" duration=592.75µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.75617599Z level=info msg="Executing migration" id="managed folder permissions library panel actions migration" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.756959296Z level=info msg="Migration successfully executed" id="managed folder permissions library panel actions migration" duration=783.096µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.757512071Z level=info msg="Executing migration" id="migrate external alertmanagers to datsourcse" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.757637416Z level=info msg="Migration successfully executed" id="migrate external alertmanagers to datsourcse" duration=125.435µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.758260773Z level=info msg="Executing migration" id="create folder table" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.758735451Z level=info msg="Migration successfully executed" id="create folder table" duration=473.646µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.759332258Z level=info msg="Executing migration" id="Add index for parent_uid" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.759924707Z level=info msg="Migration successfully executed" id="Add index for parent_uid" duration=593.479µs 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.760795647Z level=info msg="Executing migration" id="Add unique index for folder.uid and folder.org_id" 2026-03-09T20:12:19.822 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.761335146Z level=info msg="Migration successfully executed" id="Add unique index for folder.uid and folder.org_id" duration=539.179µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.762041519Z level=info msg="Executing migration" id="Update folder title length" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.762053622Z level=info msg="Migration successfully executed" id="Update folder title length" duration=12.544µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.763588755Z level=info msg="Executing migration" id="Add unique index for folder.title and folder.parent_uid" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.764838234Z level=info msg="Migration successfully executed" id="Add unique index for folder.title and folder.parent_uid" duration=1.249128ms 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.766074156Z level=info msg="Executing migration" id="Remove unique index for folder.title and folder.parent_uid" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.76684481Z level=info msg="Migration successfully executed" id="Remove unique index for folder.title and folder.parent_uid" duration=770.592µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.767563204Z level=info msg="Executing migration" id="Add unique index for title, parent_uid, and org_id" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.768150393Z level=info msg="Migration successfully executed" id="Add unique index for title, parent_uid, and org_id" duration=586.998µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.768825256Z level=info msg="Executing migration" id="Sync dashboard and folder table" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.769113175Z level=info msg="Migration successfully executed" id="Sync dashboard and folder table" duration=288.339µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.769622338Z level=info msg="Executing migration" id="Remove ghost folders from the folder table" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.76978882Z level=info msg="Migration successfully executed" id="Remove ghost folders from the folder table" duration=166.221µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.770331275Z level=info msg="Executing migration" id="Remove unique index UQE_folder_uid_org_id" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.77084117Z level=info msg="Migration successfully executed" id="Remove unique index UQE_folder_uid_org_id" duration=510.035µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.77139709Z level=info msg="Executing migration" id="Add unique index UQE_folder_org_id_uid" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.773439382Z level=info msg="Migration successfully executed" id="Add unique index UQE_folder_org_id_uid" duration=2.039598ms 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.776316086Z level=info msg="Executing migration" id="Remove unique index UQE_folder_title_parent_uid_org_id" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.777395597Z level=info msg="Migration successfully executed" id="Remove unique index UQE_folder_title_parent_uid_org_id" duration=1.081393ms 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.778178873Z level=info msg="Executing migration" id="Add unique index UQE_folder_org_id_parent_uid_title" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.779002835Z level=info msg="Migration successfully executed" id="Add unique index UQE_folder_org_id_parent_uid_title" duration=823.742µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.779721691Z level=info msg="Executing migration" id="Remove index IDX_folder_parent_uid_org_id" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.780274014Z level=info msg="Migration successfully executed" id="Remove index IDX_folder_parent_uid_org_id" duration=552.453µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.780855643Z level=info msg="Executing migration" id="create anon_device table" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.781331493Z level=info msg="Migration successfully executed" id="create anon_device table" duration=475.69µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.782088481Z level=info msg="Executing migration" id="add unique index anon_device.device_id" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.782704333Z level=info msg="Migration successfully executed" id="add unique index anon_device.device_id" duration=615.571µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.783487288Z level=info msg="Executing migration" id="add index anon_device.updated_at" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.784225481Z level=info msg="Migration successfully executed" id="add index anon_device.updated_at" duration=736.639µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.784923597Z level=info msg="Executing migration" id="create signing_key table" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.785510936Z level=info msg="Migration successfully executed" id="create signing_key table" duration=587.439µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.786588914Z level=info msg="Executing migration" id="add unique index signing_key.key_id" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.787101945Z level=info msg="Migration successfully executed" id="add unique index signing_key.key_id" duration=512.86µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.78787434Z level=info msg="Executing migration" id="set legacy alert migration status in kvstore" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.788503107Z level=info msg="Migration successfully executed" id="set legacy alert migration status in kvstore" duration=628.244µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.789507166Z level=info msg="Executing migration" id="migrate record of created folders during legacy migration to kvstore" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.789911253Z level=info msg="Migration successfully executed" id="migrate record of created folders during legacy migration to kvstore" duration=404.277µs 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.790591295Z level=info msg="Executing migration" id="Add folder_uid for dashboard" 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.795390057Z level=info msg="Migration successfully executed" id="Add folder_uid for dashboard" duration=4.791689ms 2026-03-09T20:12:19.823 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.796653201Z level=info msg="Executing migration" id="Populate dashboard folder_uid column" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.798052221Z level=info msg="Migration successfully executed" id="Populate dashboard folder_uid column" duration=1.399349ms 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.799613202Z level=info msg="Executing migration" id="Add unique index for dashboard_org_id_folder_uid_title" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.800753676Z level=info msg="Migration successfully executed" id="Add unique index for dashboard_org_id_folder_uid_title" duration=1.139873ms 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.80167495Z level=info msg="Executing migration" id="Delete unique index for dashboard_org_id_folder_id_title" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.802467354Z level=info msg="Migration successfully executed" id="Delete unique index for dashboard_org_id_folder_id_title" duration=792.834µs 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.803218751Z level=info msg="Executing migration" id="Delete unique index for dashboard_org_id_folder_uid_title" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.803892291Z level=info msg="Migration successfully executed" id="Delete unique index for dashboard_org_id_folder_uid_title" duration=673.219µs 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.804706404Z level=info msg="Executing migration" id="Add unique index for dashboard_org_id_folder_uid_title_is_folder" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.805556105Z level=info msg="Migration successfully executed" id="Add unique index for dashboard_org_id_folder_uid_title_is_folder" duration=849.439µs 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.806270252Z level=info msg="Executing migration" id="Restore index for dashboard_org_id_folder_id_title" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.80689997Z level=info msg="Migration successfully executed" id="Restore index for dashboard_org_id_folder_id_title" duration=640.749µs 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.807585052Z level=info msg="Executing migration" id="create sso_setting table" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.80821971Z level=info msg="Migration successfully executed" id="create sso_setting table" duration=634.728µs 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.809073889Z level=info msg="Executing migration" id="copy kvstore migration status to each org" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.809770253Z level=info msg="Migration successfully executed" id="copy kvstore migration status to each org" duration=696.374µs 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.81038825Z level=info msg="Executing migration" id="add back entry for orgid=0 migrated status" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.810779191Z level=info msg="Migration successfully executed" id="add back entry for orgid=0 migrated status" duration=391.001µs 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.811410733Z level=info msg="Executing migration" id="alter kv_store.value to longtext" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.811511712Z level=info msg="Migration successfully executed" id="alter kv_store.value to longtext" duration=101.26µs 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.81216798Z level=info msg="Executing migration" id="add notification_settings column to alert_rule table" 2026-03-09T20:12:19.824 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.815250049Z level=info msg="Migration successfully executed" id="add notification_settings column to alert_rule table" duration=3.079063ms 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[49698]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[53908]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:19.889 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:19 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.825718283Z level=info msg="Executing migration" id="add notification_settings column to alert_rule_version table" 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.830322269Z level=info msg="Migration successfully executed" id="add notification_settings column to alert_rule_version table" duration=4.601572ms 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.831599059Z level=info msg="Executing migration" id="removing scope from alert.instances:read action migration" 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.831926232Z level=info msg="Migration successfully executed" id="removing scope from alert.instances:read action migration" duration=327.103µs 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=migrator t=2026-03-09T20:12:19.832664773Z level=info msg="migrations completed" performed=169 skipped=378 duration=513.547535ms 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=sqlstore t=2026-03-09T20:12:19.833345799Z level=info msg="Created default organization" 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=secrets t=2026-03-09T20:12:19.836641056Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=plugin.store t=2026-03-09T20:12:19.849557251Z level=info msg="Loading plugins..." 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=local.finder t=2026-03-09T20:12:19.894967917Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=plugin.store t=2026-03-09T20:12:19.894996139Z level=info msg="Plugins loaded" count=55 duration=45.458475ms 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=query_data t=2026-03-09T20:12:19.897260217Z level=info msg="Query Service initialization" 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=live.push_http t=2026-03-09T20:12:19.900090385Z level=info msg="Live Push Gateway initialization" 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=ngalert.migration t=2026-03-09T20:12:19.901614827Z level=info msg=Starting 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=ngalert t=2026-03-09T20:12:19.906611269Z level=warn msg="Unexpected number of rows updating alert configuration history" rows=0 org=1 hash=not-yet-calculated 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=ngalert.state.manager t=2026-03-09T20:12:19.907426916Z level=info msg="Running in alternative execution of Error/NoData mode" 2026-03-09T20:12:20.092 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=infra.usagestats.collector t=2026-03-09T20:12:19.908539308Z level=info msg="registering usage stat providers" usageStatsProvidersLen=2 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=provisioning.datasources t=2026-03-09T20:12:19.911527331Z level=info msg="deleted datasource based on configuration" name=Dashboard1 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=provisioning.datasources t=2026-03-09T20:12:19.911920977Z level=info msg="inserting datasource from configuration" name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=provisioning.alerting t=2026-03-09T20:12:19.934210406Z level=info msg="starting to provision alerting" 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=provisioning.alerting t=2026-03-09T20:12:19.934229111Z level=info msg="finished to provision alerting" 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=http.server t=2026-03-09T20:12:19.936325225Z level=info msg="HTTP Server TLS settings" MinTLSVersion=TLS1.2 configuredciphers=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=http.server t=2026-03-09T20:12:19.936763054Z level=info msg="HTTP Server Listen" address=[::]:3000 protocol=https subUrl= socket= 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=ngalert.state.manager t=2026-03-09T20:12:19.936884251Z level=info msg="Warming state cache for startup" 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=ngalert.state.manager t=2026-03-09T20:12:19.937182109Z level=info msg="State cache has been initialized" states=0 duration=297.416µs 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=provisioning.dashboard t=2026-03-09T20:12:19.938000129Z level=info msg="starting to provision dashboards" 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=grafanaStorageLogger t=2026-03-09T20:12:19.939363361Z level=info msg="Storage starting" 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=ngalert.multiorg.alertmanager t=2026-03-09T20:12:19.940827612Z level=info msg="Starting MultiOrg Alertmanager" 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=ngalert.scheduler t=2026-03-09T20:12:19.940878377Z level=info msg="Starting scheduler" tickInterval=10s maxAttempts=1 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=ticker t=2026-03-09T20:12:19.94095449Z level=info msg=starting first_tick=2026-03-09T20:12:20Z 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=sqlstore.transactions t=2026-03-09T20:12:19.995762263Z level=info msg="Database locked, sleeping then retrying" error="database is locked" retry=0 code="database is locked" 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=plugins.update.checker t=2026-03-09T20:12:20.027248174Z level=info msg="Update check succeeded" duration=87.290019ms 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=sqlstore.transactions t=2026-03-09T20:12:20.029054045Z level=info msg="Database locked, sleeping then retrying" error="database is locked" retry=0 code="database is locked" 2026-03-09T20:12:20.093 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=sqlstore.transactions t=2026-03-09T20:12:20.06120984Z level=info msg="Database locked, sleeping then retrying" error="database is locked" retry=1 code="database is locked" 2026-03-09T20:12:20.342 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=sqlstore.transactions t=2026-03-09T20:12:20.096483131Z level=info msg="Database locked, sleeping then retrying" error="database is locked" retry=2 code="database is locked" 2026-03-09T20:12:20.342 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=sqlstore.transactions t=2026-03-09T20:12:20.120181016Z level=info msg="Database locked, sleeping then retrying" error="database is locked" retry=0 code="database is locked" 2026-03-09T20:12:20.342 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=grafana-apiserver t=2026-03-09T20:12:20.20932125Z level=info msg="Adding GroupVersion playlist.grafana.app v0alpha1 to ResourceManager" 2026-03-09T20:12:20.342 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=grafana-apiserver t=2026-03-09T20:12:20.209971416Z level=info msg="Adding GroupVersion featuretoggle.grafana.app v0alpha1 to ResourceManager" 2026-03-09T20:12:20.342 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:12:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=provisioning.dashboard t=2026-03-09T20:12:20.303742808Z level=info msg="finished to provision dashboards" 2026-03-09T20:12:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:21 vm06 ceph-mon[48187]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-09T20:12:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:21 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:21 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:21 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:21 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[53908]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-09T20:12:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[49698]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-09T20:12:21.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:21.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:21 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:12:22] "GET /metrics HTTP/1.1" 200 37552 "" "Prometheus/2.51.0" 2026-03-09T20:12:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:22 vm01 ceph-mon[53908]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:22 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:22 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:22 vm01 ceph-mon[53908]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T20:12:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:22 vm01 ceph-mon[49698]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:22 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:22 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:22 vm01 ceph-mon[49698]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T20:12:23.139 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:22 vm06 ceph-mon[48187]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:23.139 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:22 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:23.139 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:22 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:23.139 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:22 vm06 ceph-mon[48187]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T20:12:23.613 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:23] ENGINE Bus STOPPING 2026-03-09T20:12:23.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:23] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:12:23.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:23] ENGINE Bus STOPPED 2026-03-09T20:12:23.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:23] ENGINE Bus STARTING 2026-03-09T20:12:23.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:23] ENGINE Serving on http://:::9283 2026-03-09T20:12:23.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:23 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:23] ENGINE Bus STARTED 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.516 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: Upgrade: Finalizing container_image settings 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: Upgrade: Complete! 2026-03-09T20:12:24.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:12:24.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:12:24.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:24 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: Upgrade: Finalizing container_image settings 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: Upgrade: Finalizing container_image settings 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: Upgrade: Complete! 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: Upgrade: Complete! 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:12:24.633 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:24 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:12:25.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:25 vm06 ceph-mon[48187]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:25.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:25 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:25.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:25 vm01 ceph-mon[53908]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:25.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:25 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:25.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:25 vm01 ceph-mon[49698]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:25.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:25 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:26 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:26 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:26 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:26 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:26 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:26 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:26 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:26 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:27.268 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:26.984Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:27.268 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:26.985Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:27.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:27 vm06 ceph-mon[48187]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:27.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:27 vm01 ceph-mon[53908]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:27.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:27 vm01 ceph-mon[49698]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:29 vm06 ceph-mon[48187]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:29 vm01 ceph-mon[53908]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:29.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:29 vm01 ceph-mon[49698]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:30.715 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | length == 1'"'"'' 2026-03-09T20:12:31.333 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:12:31.393 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | keys'"'"' | grep $sha1' 2026-03-09T20:12:31.583 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:31 vm01 ceph-mon[49698]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:31.583 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:31 vm01 ceph-mon[49698]: from='client.15198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:31.583 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:31 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:31.583 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:31 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/4056634126' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:31.584 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:31 vm01 ceph-mon[53908]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:31.584 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:31 vm01 ceph-mon[53908]: from='client.15198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:31.584 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:31 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:31.584 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:31 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/4056634126' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:31 vm06 ceph-mon[48187]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:31 vm06 ceph-mon[48187]: from='client.15198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:31 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:31.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:31 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/4056634126' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:32.040 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T20:12:32.099 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | length == 2'"'"'' 2026-03-09T20:12:32.790 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:12:32.838 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:32 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1085482903' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:32.838 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:12:32] "GET /metrics HTTP/1.1" 200 37549 "" "Prometheus/2.51.0" 2026-03-09T20:12:32.838 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:32 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1085482903' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:32.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:32 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1085482903' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:32.849 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 2'"'"'' 2026-03-09T20:12:33.815 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:12:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:33 vm06 ceph-mon[48187]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:33 vm06 ceph-mon[48187]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:33 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1560548534' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:33.842 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:33 vm01 ceph-mon[49698]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:33.842 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:33 vm01 ceph-mon[49698]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:33.842 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:33 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1560548534' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:33.842 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:33 vm01 ceph-mon[53908]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:33.843 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:33 vm01 ceph-mon[53908]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:33.843 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:33 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1560548534' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:33.881 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T20:12:34.429 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:12:34.429 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": null, 2026-03-09T20:12:34.429 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": false, 2026-03-09T20:12:34.429 INFO:teuthology.orchestra.run.vm01.stdout: "which": "", 2026-03-09T20:12:34.429 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:12:34.429 INFO:teuthology.orchestra.run.vm01.stdout: "progress": null, 2026-03-09T20:12:34.429 INFO:teuthology.orchestra.run.vm01.stdout: "message": "", 2026-03-09T20:12:34.429 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:12:34.429 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:12:34.499 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:12:34.687 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:34 vm01 ceph-mon[49698]: from='client.15219 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:34.687 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:34 vm01 ceph-mon[53908]: from='client.15219 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:35.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:34 vm06 ceph-mon[48187]: from='client.15219 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:35.137 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T20:12:35.220 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.x | awk '"'"'{print $2}'"'"')' 2026-03-09T20:12:35.691 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:35 vm01 ceph-mon[49698]: from='client.25153 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:35.691 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:35 vm01 ceph-mon[49698]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:35.691 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:35 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/1349198011' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:12:35.691 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:35 vm01 ceph-mon[53908]: from='client.25153 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:35.691 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:35 vm01 ceph-mon[53908]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:35.691 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:35 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/1349198011' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:12:36.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:35 vm06 ceph-mon[48187]: from='client.25153 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:36.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:35 vm06 ceph-mon[48187]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:36.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:35 vm06 ceph-mon[48187]: from='client.? 192.168.123.101:0/1349198011' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:12:36.462 INFO:teuthology.orchestra.run.vm01.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:36.531 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T20:12:37.047 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:36.986Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:37.047 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:36.988Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:37.199 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='client.15237 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='client.25165 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm06", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:12:37.594 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:12:37.595 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 ceph-mon[48187]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (12m) 16s ago 19m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (18s) 12s ago 19m 65.6M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (26s) 16s ago 18m 43.2M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (30s) 12s ago 20m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (9m) 16s ago 21m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (21m) 16s ago 21m 79.7M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (21m) 12s ago 21m 56.4M 2048M 17.2.0 e1d6a67b021e 9f080c80b1b8 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (21m) 16s ago 21m 56.2M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (12m) 16s ago 19m 10.2M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (12m) 12s ago 19m 10.0M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (20m) 16s ago 20m 55.9M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (20m) 16s ago 20m 54.5M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (20m) 16s ago 20m 50.9M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (20m) 16s ago 20m 54.5M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (20m) 12s ago 20m 55.7M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (20m) 12s ago 20m 53.8M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (19m) 12s ago 19m 51.0M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (19m) 12s ago 19m 55.0M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (32s) 12s ago 19m 49.8M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (19m) 16s ago 19m 95.6M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:12:37.668 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (19m) 12s ago 19m 94.9M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:12:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='client.15237 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='client.25165 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm06", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='client.15237 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='client.25165 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm06", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='mgr.y' 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:12:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:37 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:37.914 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:37 vm06 systemd[1]: Stopping Ceph mon.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:12:37.933 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:12:37.933 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:12:37.933 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T20:12:37.933 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "mds": {}, 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:12:37.934 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:12:38.170 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:12:38.171 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:12:38.171 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:12:38.171 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons of type(s) mon on host(s) vm06", 2026-03-09T20:12:38.171 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:12:38.171 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "0/1 daemons upgraded", 2026-03-09T20:12:38.171 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading mon daemons", 2026-03-09T20:12:38.171 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:12:38.171 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:12:38.215 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b[48183]: 2026-03-09T20:12:37.998+0000 7f84940d1700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:12:38.215 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b[48183]: 2026-03-09T20:12:37.998+0000 7f84940d1700 -1 mon.b@2(peon) e3 *** Got Signal Terminated *** 2026-03-09T20:12:38.215 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 podman[81137]: 2026-03-09 20:12:38.12343694 +0000 UTC m=+0.145636097 container died 9f080c80b1b8045d9dfbcb6c08c7a0ec1c58f8f0ad42c3ebf2d1df55e544c383 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b, vendor=Red Hat, Inc., ceph=True, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, build-date=2022-05-03T08:36:31.336870, RELEASE=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, release=754, io.buildah.version=1.19.8, version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.expose-services=) 2026-03-09T20:12:38.215 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 podman[81137]: 2026-03-09 20:12:38.13861552 +0000 UTC m=+0.160814677 container remove 9f080c80b1b8045d9dfbcb6c08c7a0ec1c58f8f0ad42c3ebf2d1df55e544c383 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b, GIT_BRANCH=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.openshift.tags=base centos centos-stream, release=754, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-type=git, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=8, CEPH_POINT_RELEASE=-17.2.0, ceph=True, io.buildah.version=1.19.8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, architecture=x86_64, name=centos-stream) 2026-03-09T20:12:38.215 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 bash[81137]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b 2026-03-09T20:12:38.219 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:38] ENGINE Bus STOPPING 2026-03-09T20:12:38.219 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:38] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:12:38.219 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:38] ENGINE Bus STOPPED 2026-03-09T20:12:38.219 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:38] ENGINE Bus STARTING 2026-03-09T20:12:38.539 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.b.service: Deactivated successfully. 2026-03-09T20:12:38.539 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 systemd[1]: Stopped Ceph mon.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:12:38.539 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.b.service: Consumed 11.805s CPU time. 2026-03-09T20:12:38.539 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 systemd[1]: Starting Ceph mon.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:12:38.628 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:38] ENGINE Serving on http://:::9283 2026-03-09T20:12:38.628 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:38 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:12:38] ENGINE Bus STARTED 2026-03-09T20:12:38.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 podman[81245]: 2026-03-09 20:12:38.539067393 +0000 UTC m=+0.017339846 container create 7b13932b4e7d7d936187c21c30627e23061c43c45348f15244a8c555257df092 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 podman[81245]: 2026-03-09 20:12:38.576800989 +0000 UTC m=+0.055073451 container init 7b13932b4e7d7d936187c21c30627e23061c43c45348f15244a8c555257df092 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0) 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 podman[81245]: 2026-03-09 20:12:38.580158733 +0000 UTC m=+0.058431186 container start 7b13932b4e7d7d936187c21c30627e23061c43c45348f15244a8c555257df092 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 bash[81245]: 7b13932b4e7d7d936187c21c30627e23061c43c45348f15244a8c555257df092 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 podman[81245]: 2026-03-09 20:12:38.53225385 +0000 UTC m=+0.010526312 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 systemd[1]: Started Ceph mon.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: pidfile_write: ignore empty --pid-file 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: load: jerasure load: lrc 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: RocksDB version: 7.9.2 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Git sha 0 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: DB SUMMARY 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: DB Session ID: Z9L9YEQ8F0DXVWO20Q7C 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: CURRENT file: CURRENT 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: MANIFEST file: MANIFEST-000009 size: 2068 Bytes 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: SST files in /var/lib/ceph/mon/ceph-b/store.db dir, Total Num: 1, files: 000042.sst 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-b/store.db: 000040.log size: 69747 ; 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.error_if_exists: 0 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.create_if_missing: 0 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.paranoid_checks: 1 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.env: 0x55b3ddc1cdc0 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.info_log: 0x55b3de7e05c0 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.statistics: (nil) 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.use_fsync: 0 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_log_file_size: 0 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T20:12:38.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.allow_fallocate: 1 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.use_direct_reads: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.db_log_dir: 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.wal_dir: 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.write_buffer_manager: 0x55b3de7e5900 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.unordered_write: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.row_cache: None 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.wal_filter: None 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.two_write_queues: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.wal_compression: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.atomic_flush: 0 2026-03-09T20:12:38.845 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.log_readahead_size: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_background_jobs: 2 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_background_compactions: -1 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_subcompactions: 1 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_open_files: -1 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_background_flushes: -1 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Compression algorithms supported: 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: kZSTD supported: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: kXpressCompression supported: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: kBZip2Compression supported: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: kLZ4Compression supported: 1 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: kZlibCompression supported: 1 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: kSnappyCompression supported: 1 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.merge_operator: 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_filter: None 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55b3de7e05a0) 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout: cache_index_and_filter_blocks: 1 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T20:12:38.846 INFO:journalctl@ceph.mon.b.vm06.stdout: pin_top_level_index_and_filter: 1 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: index_type: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: data_block_index_type: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: index_shortening: 1 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: checksum: 4 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: no_block_cache: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: block_cache: 0x55b3de805350 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: block_cache_name: BinnedLRUCache 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: block_cache_options: 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: capacity : 536870912 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: num_shard_bits : 4 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: strict_capacity_limit : 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: high_pri_pool_ratio: 0.000 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: block_cache_compressed: (nil) 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: persistent_cache: (nil) 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: block_size: 4096 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: block_size_deviation: 10 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: block_restart_interval: 16 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: index_block_restart_interval: 1 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: metadata_block_size: 4096 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: partition_filters: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: use_delta_encoding: 1 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: filter_policy: bloomfilter 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: whole_key_filtering: 1 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: verify_compression: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: read_amp_bytes_per_bit: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: format_version: 5 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: enable_index_compression: 1 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: block_align: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: max_auto_readahead_size: 262144 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: prepopulate_block_cache: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: initial_auto_readahead_size: 8192 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression: NoCompression 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.num_levels: 7 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T20:12:38.847 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.inplace_update_support: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.bloom_locality: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.max_successive_merges: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T20:12:38.848 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.ttl: 2592000 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.enable_blob_files: false 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.min_blob_size: 0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 42.sst 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 44, last_sequence is 23694, log_number is 40,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 40 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 4baba162-aecc-4807-93be-a2507704717b 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773087158607980, "job": 1, "event": "recovery_started", "wal_files": [40]} 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #40 mode 2 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773087158609344, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 45, "file_size": 46231, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 23699, "largest_seqno": 23774, "table_properties": {"data_size": 44932, "index_size": 138, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 133, "raw_key_size": 1363, "raw_average_key_size": 27, "raw_value_size": 43859, "raw_average_value_size": 895, "num_data_blocks": 6, "num_entries": 49, "num_filter_entries": 49, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773087158, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "4baba162-aecc-4807-93be-a2507704717b", "db_session_id": "Z9L9YEQ8F0DXVWO20Q7C", "orig_file_number": 45, "seqno_to_time_mapping": "N/A"}} 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773087158609405, "job": 1, "event": "recovery_finished"} 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/version_set.cc:5047] Creating manifest 47 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-b/store.db/000040.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55b3de806e00 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: DB pointer 0x55b3de920000 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: ** DB Stats ** 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: ** Compaction Stats [default] ** 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: L0 1/0 45.15 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 36.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: L6 1/0 9.55 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Sum 2/0 9.59 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 36.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 36.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: 2026-03-09T20:12:38.849 INFO:journalctl@ceph.mon.b.vm06.stdout: ** Compaction Stats [default] ** 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 36.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: Cumulative compaction: 0.00 GB write, 4.68 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: Interval compaction: 0.00 GB write, 4.68 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: Block cache BinnedLRUCache@0x55b3de805350#2 capacity: 512.00 MB usage: 0.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 8e-06 secs_since: 0 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,0.17 KB,3.27826e-05%) IndexBlock(1,0.22 KB,4.17233e-05%) Misc(1,0.00 KB,0%) 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: starting mon.b rank 2 at public addrs [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] at bind addrs [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon_data /var/lib/ceph/mon/ceph-b fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: mon.b@-1(???) e3 preinit fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: mon.b@-1(???).mds e1 new map 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: mon.b@-1(???).mds e1 print_map 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: e1 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: legacy client fscid: -1 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout: No filesystems configured 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: mon.b@-1(???).osd e95 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: mon.b@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: mon.b@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: mon.b@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T20:12:38.850 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:38 vm06 ceph-mon[81259]: mon.b@-1(???).paxosservice(auth 1..22) refresh upgraded, format 0 -> 3 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: from='client.25192 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: mon.b calling monitor election 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: mon.a calling monitor election 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: monmap e3: 3 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0],b=[v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0],c=[v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0]} 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: fsmap 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: mgrmap e39: y(active, since 39s), standbys: x 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: overall HEALTH_OK 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:39.957 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:39 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: from='client.25192 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: mon.b calling monitor election 2026-03-09T20:12:40.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: mon.a calling monitor election 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: monmap e3: 3 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0],b=[v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0],c=[v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0]} 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: fsmap 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: mgrmap e39: y(active, since 39s), standbys: x 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: overall HEALTH_OK 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: from='client.25192 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: mon.b calling monitor election 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: mon.a calling monitor election 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: monmap e3: 3 mons at {a=[v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0],b=[v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0],c=[v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0]} 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: fsmap 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: mgrmap e39: y(active, since 39s), standbys: x 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: overall HEALTH_OK 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:40.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:39 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:41 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[49698]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:41.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[53908]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:41.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:41 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.568 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:41 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.568 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:41 vm06 ceph-mon[81259]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:41.568 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:41 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:41.568 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:41 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:42 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:12:42] "GET /metrics HTTP/1.1" 200 37549 "" "Prometheus/2.51.0" 2026-03-09T20:12:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: Detected new or changed devices on vm06 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: Detected new or changed devices on vm06 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:12:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:42.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:42 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: Detected new or changed devices on vm06 2026-03-09T20:12:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:12:43.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:12:43.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:42 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:43 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:12:43.618+0000 7f7a5a5fc640 -1 mgr.server handle_report got status from non-daemon mon.b 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all crash 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all mds 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all nfs 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all node-exporter 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all prometheus 2026-03-09T20:12:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all alertmanager 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all grafana 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all loki 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Setting container_image for all promtail 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Finalizing container_image settings 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: Upgrade: Complete! 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[49698]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all crash 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all mds 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all nfs 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all node-exporter 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all prometheus 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all alertmanager 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all grafana 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all loki 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Setting container_image for all promtail 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Finalizing container_image settings 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: Upgrade: Complete! 2026-03-09T20:12:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:43 vm01 ceph-mon[53908]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all crash 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all mds 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all nfs 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all node-exporter 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all prometheus 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all alertmanager 2026-03-09T20:12:44.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all grafana 2026-03-09T20:12:44.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all loki 2026-03-09T20:12:44.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all promtail 2026-03-09T20:12:44.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Finalizing container_image settings 2026-03-09T20:12:44.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: Upgrade: Complete! 2026-03-09T20:12:44.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:43 vm06 ceph-mon[81259]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:46.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:45 vm06 ceph-mon[81259]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:46.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:45 vm01 ceph-mon[49698]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:46.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:45 vm01 ceph-mon[53908]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:46.987 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:46 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:46.987 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:46 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:46.987 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:46 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:46.987 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:46 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:47.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:46 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:12:47.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:46 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:12:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:46.987Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:46.988Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:48.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:47 vm06 ceph-mon[81259]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:48.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:47 vm01 ceph-mon[49698]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:48.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:47 vm01 ceph-mon[53908]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:50.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:49 vm06 ceph-mon[81259]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:50.095 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:49 vm01 ceph-mon[49698]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:50.096 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:49 vm01 ceph-mon[53908]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:50 vm06 ceph-mon[81259]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:51.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:50 vm01 ceph-mon[49698]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:51.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:50 vm01 ceph-mon[53908]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:52.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:12:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:12:52] "GET /metrics HTTP/1.1" 200 37561 "" "Prometheus/2.51.0" 2026-03-09T20:12:53.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:53 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:53.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:53 vm06 ceph-mon[81259]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:53.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:53 vm01 ceph-mon[49698]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:53.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:53 vm01 ceph-mon[49698]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:53.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:53 vm01 ceph-mon[53908]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:12:53.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:53 vm01 ceph-mon[53908]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:55 vm06 ceph-mon[81259]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:55.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:55 vm01 ceph-mon[49698]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:55.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:55 vm01 ceph-mon[53908]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:56.987Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:12:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:12:56.988Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:12:57.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:57 vm06 ceph-mon[81259]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:57.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:57 vm01 ceph-mon[49698]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:57.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:57 vm01 ceph-mon[53908]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:12:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:12:59 vm06 ceph-mon[81259]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:59.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:12:59 vm01 ceph-mon[49698]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:12:59.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:12:59 vm01 ceph-mon[53908]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:01 vm06 ceph-mon[81259]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:01 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:02.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:01 vm01 ceph-mon[49698]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:02.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:01 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:02.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:01 vm01 ceph-mon[53908]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:02.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:01 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:02.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:13:02] "GET /metrics HTTP/1.1" 200 37559 "" "Prometheus/2.51.0" 2026-03-09T20:13:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:03 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:04.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:03 vm06 ceph-mon[81259]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:03 vm01 ceph-mon[49698]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:04.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:03 vm01 ceph-mon[49698]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:03 vm01 ceph-mon[53908]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:04.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:03 vm01 ceph-mon[53908]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:06.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:05 vm06 ceph-mon[81259]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:06.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:05 vm01 ceph-mon[49698]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:06.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:05 vm01 ceph-mon[53908]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:07.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:06 vm06 ceph-mon[81259]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:07.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:06 vm01 ceph-mon[49698]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:07.128 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:06.987Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:07.128 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:06.988Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:07.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:06 vm01 ceph-mon[53908]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:08.478 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (13m) 47s ago 19m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (49s) 28s ago 19m 76.5M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (57s) 47s ago 19m 43.2M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (62s) 28s ago 21m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (9m) 47s ago 22m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (22m) 47s ago 22m 79.7M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (30s) 28s ago 21m 20.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (21m) 47s ago 21m 56.2M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (13m) 47s ago 20m 10.2M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (13m) 28s ago 19m 10.0M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (21m) 47s ago 21m 55.9M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (21m) 47s ago 21m 54.5M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (20m) 47s ago 20m 50.9M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (20m) 47s ago 20m 54.5M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (20m) 28s ago 20m 55.8M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (20m) 28s ago 20m 53.8M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (20m) 28s ago 20m 51.0M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (20m) 28s ago 20m 55.1M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (63s) 28s ago 19m 50.7M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (19m) 47s ago 19m 95.6M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:13:09.095 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (19m) 28s ago 19m 95.0M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:13:09.153 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mon | length == 2'"'"'' 2026-03-09T20:13:09.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:09 vm01 ceph-mon[49698]: from='client.25198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:09.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:09 vm01 ceph-mon[49698]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:09.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:09 vm01 ceph-mon[53908]: from='client.25198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:09.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:09 vm01 ceph-mon[53908]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:09.733 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:13:09.780 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T20:13:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:09 vm06 ceph-mon[81259]: from='client.25198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:09 vm06 ceph-mon[81259]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:10.321 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:13:10.321 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": null, 2026-03-09T20:13:10.321 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": false, 2026-03-09T20:13:10.321 INFO:teuthology.orchestra.run.vm01.stdout: "which": "", 2026-03-09T20:13:10.321 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:13:10.321 INFO:teuthology.orchestra.run.vm01.stdout: "progress": null, 2026-03-09T20:13:10.321 INFO:teuthology.orchestra.run.vm01.stdout: "message": "", 2026-03-09T20:13:10.321 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:13:10.321 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:13:10.490 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:13:10.775 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:10 vm01 ceph-mon[49698]: from='client.25204 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:10.775 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:10 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/2018780700' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:10.776 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:10 vm01 ceph-mon[53908]: from='client.25204 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:10.776 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:10 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/2018780700' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:10 vm06 ceph-mon[81259]: from='client.25204 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:10 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/2018780700' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:11.184 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T20:13:11.340 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.y | awk '"'"'{print $2}'"'"')' 2026-03-09T20:13:11.775 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:11 vm01 ceph-mon[49698]: from='client.15279 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:11.775 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:11 vm01 ceph-mon[49698]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:11.775 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:11 vm01 ceph-mon[49698]: from='client.? 192.168.123.101:0/629810297' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:13:11.776 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:11 vm01 ceph-mon[53908]: from='client.15279 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:11.776 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:11 vm01 ceph-mon[53908]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:11.776 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:11 vm01 ceph-mon[53908]: from='client.? 192.168.123.101:0/629810297' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:13:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:11 vm06 ceph-mon[81259]: from='client.15279 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:11 vm06 ceph-mon[81259]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:11 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/629810297' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:13:12.481 INFO:teuthology.orchestra.run.vm01.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:12.553 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T20:13:12.874 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:13:12] "GET /metrics HTTP/1.1" 200 37559 "" "Prometheus/2.51.0" 2026-03-09T20:13:13.169 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='client.25225 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='client.15291 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm01", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[49698]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='client.25225 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='client.15291 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm01", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.480 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T20:13:13.481 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:13 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-09T20:13:13.680 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (13m) 52s ago 19m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (54s) 33s ago 19m 76.5M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (62s) 52s ago 19m 43.2M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (66s) 33s ago 21m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (9m) 52s ago 22m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (22m) 52s ago 22m 79.7M 2048M 17.2.0 e1d6a67b021e dac2494fb686 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (35s) 33s ago 21m 20.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (21m) 52s ago 21m 56.2M 2048M 17.2.0 e1d6a67b021e 655d47ef7c00 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (13m) 52s ago 20m 10.2M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (13m) 33s ago 20m 10.0M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (21m) 52s ago 21m 55.9M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (21m) 52s ago 21m 54.5M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (21m) 52s ago 21m 50.9M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (20m) 52s ago 20m 54.5M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (20m) 33s ago 20m 55.8M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (20m) 33s ago 20m 53.8M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (20m) 33s ago 20m 51.0M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (20m) 33s ago 20m 55.1M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (68s) 33s ago 19m 50.7M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (19m) 52s ago 19m 95.6M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:13:13.681 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (19m) 33s ago 19m 95.0M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='client.25225 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='client.15291 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm01", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' 2026-03-09T20:13:13.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T20:13:13.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:13 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-09T20:13:13.983 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:13:13.983 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "mds": {}, 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 12, 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:13:13.984 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:13:14.275 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:13:14.275 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:13:14.275 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:13:14.275 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons of type(s) mon on host(s) vm01", 2026-03-09T20:13:14.275 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:13:14.275 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "0/2 daemons upgraded", 2026-03-09T20:13:14.275 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading mon daemons", 2026-03-09T20:13:14.276 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:13:14.276 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:13:14.353 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 systemd[1]: Stopping Ceph mon.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a[49694]: 2026-03-09T20:13:14.351+0000 7f399527b700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a[49694]: 2026-03-09T20:13:14.351+0000 7f399527b700 -1 mon.a@0(leader) e3 *** Got Signal Terminated *** 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 podman[96933]: 2026-03-09 20:13:14.493665979 +0000 UTC m=+0.157954839 container died dac2494fb686fd88eb678fd8df1466c97fa90ae62385b3860f2dc6ff89915b8f (image=quay.io/ceph/ceph:v17.2.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, name=centos-stream, ceph=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, architecture=x86_64, distribution-scope=public, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, maintainer=Guillaume Abrioux , description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, GIT_CLEAN=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, com.redhat.component=centos-stream-container, RELEASE=HEAD, io.buildah.version=1.19.8, io.openshift.expose-services=, vendor=Red Hat, Inc., version=8, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, GIT_BRANCH=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:13:14] ENGINE Bus STOPPING 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:13:14] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 podman[96933]: 2026-03-09 20:13:14.511060868 +0000 UTC m=+0.175349728 container remove dac2494fb686fd88eb678fd8df1466c97fa90ae62385b3860f2dc6ff89915b8f (image=quay.io/ceph/ceph:v17.2.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, name=centos-stream, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, CEPH_POINT_RELEASE=-17.2.0, io.k8s.display-name=CentOS Stream 8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., io.buildah.version=1.19.8, io.openshift.expose-services=, distribution-scope=public, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, vcs-type=git, com.redhat.component=centos-stream-container, version=8, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream) 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 bash[96933]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.a.service: Deactivated successfully. 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 systemd[1]: Stopped Ceph mon.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:13:14.613 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.a.service: Consumed 16.102s CPU time. 2026-03-09T20:13:14.875 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:13:14] ENGINE Bus STOPPED 2026-03-09T20:13:14.875 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:13:14] ENGINE Bus STARTING 2026-03-09T20:13:14.875 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:13:14] ENGINE Serving on http://:::9283 2026-03-09T20:13:14.875 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:14 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:13:14] ENGINE Bus STARTED 2026-03-09T20:13:14.876 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 systemd[1]: Starting Ceph mon.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:13:15.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 podman[97052]: 2026-03-09 20:13:14.875862868 +0000 UTC m=+0.017343384 container create 2eb02d7b7f8ab458f3c069d59fdc605debafd208f11bbcfa1b3eee84d7bb029a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:13:15.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 podman[97052]: 2026-03-09 20:13:14.912486948 +0000 UTC m=+0.053967464 container init 2eb02d7b7f8ab458f3c069d59fdc605debafd208f11bbcfa1b3eee84d7bb029a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223) 2026-03-09T20:13:15.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 podman[97052]: 2026-03-09 20:13:14.915136337 +0000 UTC m=+0.056616853 container start 2eb02d7b7f8ab458f3c069d59fdc605debafd208f11bbcfa1b3eee84d7bb029a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:13:15.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 bash[97052]: 2eb02d7b7f8ab458f3c069d59fdc605debafd208f11bbcfa1b3eee84d7bb029a 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 podman[97052]: 2026-03-09 20:13:14.868082015 +0000 UTC m=+0.009562540 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 systemd[1]: Started Ceph mon.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: pidfile_write: ignore empty --pid-file 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: load: jerasure load: lrc 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: RocksDB version: 7.9.2 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Git sha 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: DB SUMMARY 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: DB Session ID: BIOIEQ1FCPKCWKCRG8P7 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: CURRENT file: CURRENT 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: MANIFEST file: MANIFEST-000015 size: 2144 Bytes 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000048.sst 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000046.log size: 865593 ; 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.error_if_exists: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.create_if_missing: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.paranoid_checks: 1 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.env: 0x55ad621a7dc0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.info_log: 0x55ad644237e0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.statistics: (nil) 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.use_fsync: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_log_file_size: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.allow_fallocate: 1 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.use_direct_reads: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.db_log_dir: 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.wal_dir: 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.write_buffer_manager: 0x55ad64427900 2026-03-09T20:13:15.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.unordered_write: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.row_cache: None 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.wal_filter: None 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.two_write_queues: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.wal_compression: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.atomic_flush: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.log_readahead_size: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_background_jobs: 2 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_background_compactions: -1 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_subcompactions: 1 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_open_files: -1 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_background_flushes: -1 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Compression algorithms supported: 2026-03-09T20:13:15.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: kZSTD supported: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: kXpressCompression supported: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: kBZip2Compression supported: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: kLZ4Compression supported: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: kZlibCompression supported: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: kSnappyCompression supported: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.merge_operator: 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_filter: None 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55ad64422320) 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: cache_index_and_filter_blocks: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: pin_top_level_index_and_filter: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: index_type: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: data_block_index_type: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: index_shortening: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: checksum: 4 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: no_block_cache: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache: 0x55ad64447350 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache_name: BinnedLRUCache 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache_options: 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: capacity : 536870912 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: num_shard_bits : 4 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: strict_capacity_limit : 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: high_pri_pool_ratio: 0.000 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache_compressed: (nil) 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: persistent_cache: (nil) 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: block_size: 4096 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: block_size_deviation: 10 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: block_restart_interval: 16 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: index_block_restart_interval: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: metadata_block_size: 4096 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: partition_filters: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: use_delta_encoding: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: filter_policy: bloomfilter 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: whole_key_filtering: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: verify_compression: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: read_amp_bytes_per_bit: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: format_version: 5 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: enable_index_compression: 1 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: block_align: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: max_auto_readahead_size: 262144 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: prepopulate_block_cache: 0 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: initial_auto_readahead_size: 8192 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression: NoCompression 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T20:13:15.131 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.num_levels: 7 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.inplace_update_support: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T20:13:15.132 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.bloom_locality: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.max_successive_merges: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.ttl: 2592000 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.enable_blob_files: false 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.min_blob_size: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 48.sst 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 succeeded,manifest_file_number is 15, next_file_number is 50, last_sequence is 21995, log_number is 46,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 46 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 37bc990c-b1c3-41f6-9710-5d7af46b0042 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773087194940245, "job": 1, "event": "recovery_started", "wal_files": [46]} 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #46 mode 2 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773087194944896, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 51, "file_size": 792140, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 21996, "largest_seqno": 22865, "table_properties": {"data_size": 788138, "index_size": 1879, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1093, "raw_key_size": 10887, "raw_average_key_size": 26, "raw_value_size": 779561, "raw_average_value_size": 1896, "num_data_blocks": 85, "num_entries": 411, "num_filter_entries": 411, "num_deletions": 8, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773087194, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "37bc990c-b1c3-41f6-9710-5d7af46b0042", "db_session_id": "BIOIEQ1FCPKCWKCRG8P7", "orig_file_number": 51, "seqno_to_time_mapping": "N/A"}} 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773087194945009, "job": 1, "event": "recovery_finished"} 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/version_set.cc:5047] Creating manifest 53 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000046.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55ad64448e00 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: DB pointer 0x55ad64552000 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: ** DB Stats ** 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: ** Compaction Stats [default] ** 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: L0 1/0 773.57 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 208.5 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: L6 1/0 9.55 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Sum 2/0 10.31 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 208.5 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 208.5 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: ** Compaction Stats [default] ** 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 208.5 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T20:13:15.133 INFO:journalctl@ceph.mon.a.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: Flush(GB): cumulative 0.001, interval 0.001 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative compaction: 0.00 GB write, 66.54 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval compaction: 0.00 GB write, 66.54 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: Block cache BinnedLRUCache@0x55ad64447350#2 capacity: 512.00 MB usage: 3.23 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.1e-05 secs_since: 0 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,1.16 KB,0.000220537%) IndexBlock(1,2.08 KB,0.000396371%) Misc(1,0.00 KB,0%) 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: starting mon.a rank 0 at public addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] at bind addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: mon.a@-1(???) e3 preinit fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: mon.a@-1(???).mds e1 new map 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: mon.a@-1(???).mds e1 print_map 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: e1 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: legacy client fscid: -1 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout: No filesystems configured 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: mon.a@-1(???).osd e95 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: mon.a@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: mon.a@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: mon.a@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T20:13:15.134 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:14 vm01 ceph-mon[97066]: mon.a@-1(???).paxosservice(auth 1..24) refresh upgraded, format 0 -> 3 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: Deploying daemon mon.a on vm01 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: from='client.34124 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: mon.a calling monitor election 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: monmap epoch 3 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: last_changed 2026-03-09T19:51:38.174339+0000 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: created 2026-03-09T19:50:55.405959+0000 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: min_mon_release 17 (quincy) 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: election_strategy: 1 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: 1: [v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0] mon.c 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: fsmap 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: mgrmap e39: y(active, since 76s), standbys: x 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: overall HEALTH_OK 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: from='mgr.24845 ' entity='' 2026-03-09T20:13:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:16 vm06 ceph-mon[81259]: mgrmap e40: y(active, since 76s), standbys: x 2026-03-09T20:13:16.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: ignoring --setuser ceph since I am not root 2026-03-09T20:13:16.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: ignoring --setgroup ceph since I am not root 2026-03-09T20:13:16.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:16.538+0000 7fafe772c140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T20:13:16.843 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:16 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:16.582+0000 7fafe772c140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: Deploying daemon mon.a on vm01 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: from='client.34124 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: mon.a calling monitor election 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T20:13:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: monmap epoch 3 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: last_changed 2026-03-09T19:51:38.174339+0000 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: created 2026-03-09T19:50:55.405959+0000 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: min_mon_release 17 (quincy) 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: election_strategy: 1 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: 1: [v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0] mon.c 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: fsmap 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: mgrmap e39: y(active, since 76s), standbys: x 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: overall HEALTH_OK 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: from='mgr.24845 ' entity='' 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[97066]: mgrmap e40: y(active, since 76s), standbys: x 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ignoring --setuser ceph since I am not root 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ignoring --setgroup ceph since I am not root 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mgr[83566]: -- 192.168.123.101:0/3707728513 <== mon.2 v2:192.168.123.106:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x5556f5cd14a0 con 0x5556f5caf400 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:16.553+0000 7f1379c79140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:16.600+0000 7f1379c79140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: Deploying daemon mon.a on vm01 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: from='client.34124 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: mon.a calling monitor election 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: from='mgr.24845 192.168.123.101:0/2066658602' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: monmap epoch 3 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: last_changed 2026-03-09T19:51:38.174339+0000 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: created 2026-03-09T19:50:55.405959+0000 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: min_mon_release 17 (quincy) 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: election_strategy: 1 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: 1: [v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0] mon.c 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: fsmap 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: mgrmap e39: y(active, since 76s), standbys: x 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: overall HEALTH_OK 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: from='mgr.24845 ' entity='' 2026-03-09T20:13:16.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:16 vm01 ceph-mon[53908]: mgrmap e40: y(active, since 76s), standbys: x 2026-03-09T20:13:17.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:17 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:17.015+0000 7fafe772c140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T20:13:17.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:17.058+0000 7f1379c79140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T20:13:17.379 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:16.989Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:17.379 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:16.989Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:17.638 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:17 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:17.380+0000 7fafe772c140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T20:13:17.639 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:17 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T20:13:17.639 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:17 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T20:13:17.639 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:17 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: from numpy import show_config as show_numpy_config 2026-03-09T20:13:17.639 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:17 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:17.500+0000 7fafe772c140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T20:13:17.639 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:17 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:17.544+0000 7fafe772c140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T20:13:17.639 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:17 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:17.637+0000 7fafe772c140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T20:13:17.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:17.433+0000 7f1379c79140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T20:13:17.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T20:13:17.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T20:13:17.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: from numpy import show_config as show_numpy_config 2026-03-09T20:13:17.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:17.533+0000 7f1379c79140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T20:13:17.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:17.571+0000 7f1379c79140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T20:13:17.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:17.649+0000 7f1379c79140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T20:13:18.494 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:18.223+0000 7f1379c79140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T20:13:18.495 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:18.355+0000 7f1379c79140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:13:18.495 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:18.408+0000 7f1379c79140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T20:13:18.495 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:18.444+0000 7f1379c79140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T20:13:18.495 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:18 vm01 ceph-mon[97066]: mgrmap e41: y(active, since 77s), standbys: x 2026-03-09T20:13:18.495 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:18 vm01 ceph-mon[53908]: mgrmap e41: y(active, since 77s), standbys: x 2026-03-09T20:13:18.530 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:18 vm06 ceph-mon[81259]: mgrmap e41: y(active, since 77s), standbys: x 2026-03-09T20:13:18.530 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:18 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:18.273+0000 7fafe772c140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T20:13:18.531 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:18 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:18.393+0000 7fafe772c140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:13:18.531 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:18 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:18.441+0000 7fafe772c140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T20:13:18.531 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:18 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:18.481+0000 7fafe772c140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T20:13:18.800 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:18.494+0000 7f1379c79140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T20:13:18.800 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:18.536+0000 7f1379c79140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T20:13:18.800 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:18.739+0000 7f1379c79140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T20:13:18.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:18 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:18.529+0000 7fafe772c140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T20:13:18.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:18 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:18.573+0000 7fafe772c140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T20:13:18.842 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:18 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:18.774+0000 7fafe772c140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T20:13:19.106 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:18 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:18.848+0000 7fafe772c140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T20:13:19.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:18.799+0000 7f1379c79140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T20:13:19.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:19.048+0000 7f1379c79140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T20:13:19.419 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:19.376+0000 7f1379c79140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T20:13:19.439 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:19.105+0000 7fafe772c140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T20:13:19.683 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:19.418+0000 7f1379c79140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T20:13:19.683 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:19.466+0000 7f1379c79140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T20:13:19.683 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:19.554+0000 7f1379c79140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T20:13:19.683 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:19.595+0000 7f1379c79140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T20:13:19.743 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:19.437+0000 7fafe772c140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T20:13:19.743 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:19.480+0000 7fafe772c140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T20:13:19.743 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:19.526+0000 7fafe772c140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T20:13:19.743 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:19.612+0000 7fafe772c140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T20:13:19.743 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:19.653+0000 7fafe772c140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T20:13:19.957 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:19.682+0000 7f1379c79140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T20:13:19.957 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:19.805+0000 7f1379c79140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:13:20.055 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:19.742+0000 7fafe772c140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T20:13:20.055 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:19 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:19.874+0000 7fafe772c140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T20:13:20.260 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:19 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:19.956+0000 7f1379c79140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T20:13:20.260 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:20.011+0000 7f1379c79140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T20:13:20.260 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:13:20] ENGINE Bus STARTING 2026-03-09T20:13:20.260 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: CherryPy Checker: 2026-03-09T20:13:20.260 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: The Application mounted at '' has an empty config. 2026-03-09T20:13:20.260 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:20.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:20.053+0000 7fafe772c140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T20:13:20.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:20.097+0000 7fafe772c140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T20:13:20.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: [09/Mar/2026:20:13:20] ENGINE Bus STARTING 2026-03-09T20:13:20.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: CherryPy Checker: 2026-03-09T20:13:20.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: The Application mounted at '' has an empty config. 2026-03-09T20:13:20.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: 2026-03-09T20:13:20.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: [09/Mar/2026:20:13:20] ENGINE Serving on http://:::9283 2026-03-09T20:13:20.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:13:20 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x[78297]: [09/Mar/2026:20:13:20] ENGINE Bus STARTED 2026-03-09T20:13:20.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:13:20] ENGINE Serving on http://:::9283 2026-03-09T20:13:20.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:13:20] ENGINE Bus STARTED 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: Active manager daemon y restarted 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: Activating manager daemon y 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: mgrmap e42: y(active, starting, since 0.031492s), standbys: x 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: Manager daemon y is now available 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: Standby manager daemon x restarted 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: Standby manager daemon x started 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:21.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: Active manager daemon y restarted 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: Activating manager daemon y 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: mgrmap e42: y(active, starting, since 0.031492s), standbys: x 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:13:21.104 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: Manager daemon y is now available 2026-03-09T20:13:21.105 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: Standby manager daemon x restarted 2026-03-09T20:13:21.105 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: Standby manager daemon x started 2026-03-09T20:13:21.105 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:13:21.105 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:13:21.105 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:13:21.105 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:13:21.105 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:13:21.105 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:21.105 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: Active manager daemon y restarted 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: Activating manager daemon y 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: mgrmap e42: y(active, starting, since 0.031492s), standbys: x 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: Manager daemon y is now available 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: Standby manager daemon x restarted 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: Standby manager daemon x started 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.? 192.168.123.106:0/689052252' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:21.106 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:20 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T20:13:21.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:21 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:21.099+0000 7f1345fe3640 -1 mgr.server handle_report got status from non-daemon mon.a 2026-03-09T20:13:22.049 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:22 vm06 ceph-mon[81259]: mgrmap e43: y(active, since 1.07684s), standbys: x 2026-03-09T20:13:22.049 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:22 vm06 ceph-mon[81259]: [09/Mar/2026:20:13:21] ENGINE Bus STARTING 2026-03-09T20:13:22.049 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:22 vm06 ceph-mon[81259]: [09/Mar/2026:20:13:21] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T20:13:22.049 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:22 vm06 ceph-mon[81259]: [09/Mar/2026:20:13:21] ENGINE Client ('192.168.123.101', 35382) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:13:22.049 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:22 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.049 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:22 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.049 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:22 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.049 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:22 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[97066]: mgrmap e43: y(active, since 1.07684s), standbys: x 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[97066]: [09/Mar/2026:20:13:21] ENGINE Bus STARTING 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[97066]: [09/Mar/2026:20:13:21] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[97066]: [09/Mar/2026:20:13:21] ENGINE Client ('192.168.123.101', 35382) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[53908]: mgrmap e43: y(active, since 1.07684s), standbys: x 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[53908]: [09/Mar/2026:20:13:21] ENGINE Bus STARTING 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[53908]: [09/Mar/2026:20:13:21] ENGINE Serving on https://192.168.123.101:7150 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[53908]: [09/Mar/2026:20:13:21] ENGINE Client ('192.168.123.101', 35382) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.051 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:22 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:13:22] "GET /metrics HTTP/1.1" 200 34775 "" "Prometheus/2.51.0" 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: [09/Mar/2026:20:13:21] ENGINE Serving on http://192.168.123.101:8765 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: [09/Mar/2026:20:13:21] ENGINE Bus STARTED 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:13:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:23 vm06 ceph-mon[81259]: mgrmap e44: y(active, since 2s), standbys: x 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: [09/Mar/2026:20:13:21] ENGINE Serving on http://192.168.123.101:8765 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: [09/Mar/2026:20:13:21] ENGINE Bus STARTED 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[97066]: mgrmap e44: y(active, since 2s), standbys: x 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: [09/Mar/2026:20:13:21] ENGINE Serving on http://192.168.123.101:8765 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: [09/Mar/2026:20:13:21] ENGINE Bus STARTED 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:13:23.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:23 vm01 ceph-mon[53908]: mgrmap e44: y(active, since 2s), standbys: x 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T20:13:24.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:24 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-09T20:13:24.623 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.623 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.623 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: Updating vm01:/etc/ceph/ceph.conf 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T20:13:24.624 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-mon[53908]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-09T20:13:25.263 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 systemd[1]: Stopping Ceph mon.c for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:13:25.263 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-c[53904]: 2026-03-09T20:13:24.971+0000 7fa305f79700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:13:25.263 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:24 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-c[53904]: 2026-03-09T20:13:24.971+0000 7fa305f79700 -1 mon.c@1(peon) e3 *** Got Signal Terminated *** 2026-03-09T20:13:25.263 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 podman[98752]: 2026-03-09 20:13:25.262574421 +0000 UTC m=+0.314562806 container died 655d47ef7c0008733362358525cf96e3748e7b469cc5b81fa7ae4bb5849b7706 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-c, io.openshift.tags=base centos centos-stream, release=754, io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, build-date=2022-05-03T08:36:31.336870, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, version=8, distribution-scope=public, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, io.buildah.version=1.19.8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.display-name=CentOS Stream 8, RELEASE=HEAD, name=centos-stream, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.component=centos-stream-container, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Guillaume Abrioux , architecture=x86_64) 2026-03-09T20:13:25.563 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 podman[98752]: 2026-03-09 20:13:25.311940592 +0000 UTC m=+0.363928977 container remove 655d47ef7c0008733362358525cf96e3748e7b469cc5b81fa7ae4bb5849b7706 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-c, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_REPO=https://github.com/ceph/ceph-container.git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_BRANCH=HEAD, RELEASE=HEAD, GIT_CLEAN=True, ceph=True, io.openshift.expose-services=, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.buildah.version=1.19.8, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, CEPH_POINT_RELEASE=-17.2.0, release=754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, version=8, io.k8s.display-name=CentOS Stream 8, maintainer=Guillaume Abrioux , name=centos-stream) 2026-03-09T20:13:25.563 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 bash[98752]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-c 2026-03-09T20:13:25.563 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.c.service: Deactivated successfully. 2026-03-09T20:13:25.563 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 systemd[1]: Stopped Ceph mon.c for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:13:25.563 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.c.service: Consumed 10.261s CPU time. 2026-03-09T20:13:25.563 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 systemd[1]: Starting Ceph mon.c for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 podman[98862]: 2026-03-09 20:13:25.658200957 +0000 UTC m=+0.014966684 container create c1e1918b4ae111091867d3378664ad6e6bec127c28dfdc593505005a8a1df1a4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-c, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 podman[98862]: 2026-03-09 20:13:25.69295173 +0000 UTC m=+0.049717468 container init c1e1918b4ae111091867d3378664ad6e6bec127c28dfdc593505005a8a1df1a4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-c, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, ceph=True, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 podman[98862]: 2026-03-09 20:13:25.69561774 +0000 UTC m=+0.052383478 container start c1e1918b4ae111091867d3378664ad6e6bec127c28dfdc593505005a8a1df1a4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-c, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 bash[98862]: c1e1918b4ae111091867d3378664ad6e6bec127c28dfdc593505005a8a1df1a4 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 podman[98862]: 2026-03-09 20:13:25.652076543 +0000 UTC m=+0.008842292 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 systemd[1]: Started Ceph mon.c for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: pidfile_write: ignore empty --pid-file 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: load: jerasure load: lrc 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: RocksDB version: 7.9.2 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Git sha 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: DB SUMMARY 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: DB Session ID: 3WMCG8NE5OKEWG14RA63 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: CURRENT file: CURRENT 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: MANIFEST file: MANIFEST-000009 size: 2258 Bytes 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: SST files in /var/lib/ceph/mon/ceph-c/store.db dir, Total Num: 1, files: 000045.sst 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-c/store.db: 000043.log size: 2093978 ; 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.error_if_exists: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.create_if_missing: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.paranoid_checks: 1 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.env: 0x55b120f0fdc0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.info_log: 0x55b1224045c0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.statistics: (nil) 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.use_fsync: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_log_file_size: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.allow_fallocate: 1 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.use_direct_reads: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T20:13:25.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.db_log_dir: 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.wal_dir: 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.write_buffer_manager: 0x55b122409900 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.unordered_write: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.row_cache: None 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.wal_filter: None 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.two_write_queues: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.wal_compression: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.atomic_flush: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.log_readahead_size: 0 2026-03-09T20:13:25.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_background_jobs: 2 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_background_compactions: -1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_subcompactions: 1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_open_files: -1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_background_flushes: -1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Compression algorithms supported: 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: kZSTD supported: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: kXpressCompression supported: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: kBZip2Compression supported: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: kLZ4Compression supported: 1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: kZlibCompression supported: 1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: kSnappyCompression supported: 1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.merge_operator: 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_filter: None 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55b1224045a0) 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout: cache_index_and_filter_blocks: 1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout: pin_top_level_index_and_filter: 1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout: index_type: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout: data_block_index_type: 0 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout: index_shortening: 1 2026-03-09T20:13:25.881 INFO:journalctl@ceph.mon.c.vm01.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: checksum: 4 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: no_block_cache: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: block_cache: 0x55b122429350 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: block_cache_name: BinnedLRUCache 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: block_cache_options: 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: capacity : 536870912 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: num_shard_bits : 4 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: strict_capacity_limit : 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: high_pri_pool_ratio: 0.000 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: block_cache_compressed: (nil) 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: persistent_cache: (nil) 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: block_size: 4096 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: block_size_deviation: 10 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: block_restart_interval: 16 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: index_block_restart_interval: 1 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: metadata_block_size: 4096 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: partition_filters: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: use_delta_encoding: 1 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: filter_policy: bloomfilter 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: whole_key_filtering: 1 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: verify_compression: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: read_amp_bytes_per_bit: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: format_version: 5 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: enable_index_compression: 1 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: block_align: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: max_auto_readahead_size: 262144 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: prepopulate_block_cache: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: initial_auto_readahead_size: 8192 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression: NoCompression 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.num_levels: 7 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T20:13:25.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.inplace_update_support: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.bloom_locality: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.max_successive_merges: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.ttl: 2592000 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.enable_blob_files: false 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.min_blob_size: 0 2026-03-09T20:13:25.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 45.sst 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 47, last_sequence is 25090, log_number is 43,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 43 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 07a37024-7fa0-4f99-ba14-c3f3e461d4f2 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773087205719773, "job": 1, "event": "recovery_started", "wal_files": [43]} 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #43 mode 2 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773087205729884, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 48, "file_size": 1312531, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 25095, "largest_seqno": 25366, "table_properties": {"data_size": 1309929, "index_size": 1183, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 389, "raw_key_size": 3517, "raw_average_key_size": 24, "raw_value_size": 1306420, "raw_average_value_size": 9135, "num_data_blocks": 53, "num_entries": 143, "num_filter_entries": 143, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773087205, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "07a37024-7fa0-4f99-ba14-c3f3e461d4f2", "db_session_id": "3WMCG8NE5OKEWG14RA63", "orig_file_number": 48, "seqno_to_time_mapping": "N/A"}} 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773087205729941, "job": 1, "event": "recovery_finished"} 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/version_set.cc:5047] Creating manifest 50 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-c/store.db/000043.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55b12242ae00 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: DB pointer 0x55b122540000 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: ** DB Stats ** 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: ** Compaction Stats [default] ** 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: L0 1/0 1.25 MB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 223.3 0.01 0.00 1 0.006 0 0 0.0 0.0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: L6 1/0 10.73 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Sum 2/0 11.98 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 223.3 0.01 0.00 1 0.006 0 0 0.0 0.0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 223.3 0.01 0.00 1 0.006 0 0 0.0 0.0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: ** Compaction Stats [default] ** 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 223.3 0.01 0.00 1 0.006 0 0 0.0 0.0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Flush(GB): cumulative 0.001, interval 0.001 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Cumulative compaction: 0.00 GB write, 95.81 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Interval compaction: 0.00 GB write, 95.81 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Block cache BinnedLRUCache@0x55b122429350#2 capacity: 512.00 MB usage: 1.77 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 9e-06 secs_since: 0 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,0.44 KB,8.34465e-05%) IndexBlock(1,1.33 KB,0.00025332%) Misc(1,0.00 KB,0%) 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: 2026-03-09T20:13:25.884 INFO:journalctl@ceph.mon.c.vm01.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: starting mon.c rank 1 at public addrs [v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0] at bind addrs [v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0] mon_data /var/lib/ceph/mon/ceph-c fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: mon.c@-1(???) e3 preinit fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: mon.c@-1(???).mds e1 new map 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: mon.c@-1(???).mds e1 print_map 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout: e1 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout: legacy client fscid: -1 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout: 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout: No filesystems configured 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: mon.c@-1(???).osd e96 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: mon.c@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: mon.c@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: mon.c@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T20:13:25.885 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:25 vm01 ceph-mon[98877]: mon.c@-1(???).paxosservice(auth 1..25) refresh upgraded, format 0 -> 3 2026-03-09T20:13:27.065 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:26.990Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:27.066 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:26 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:26.991Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: mon.a calling monitor election 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: mon.c calling monitor election 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: mon.b calling monitor election 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: monmap epoch 4 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: last_changed 2026-03-09T20:13:26.430633+0000 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: created 2026-03-09T19:50:55.405959+0000 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: min_mon_release 19 (squid) 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: election_strategy: 1 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: 1: [v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0] mon.c 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: fsmap 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: mgrmap e45: y(active, since 6s), standbys: x 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: overall HEALTH_OK 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: mon.a calling monitor election 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: mon.c calling monitor election 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: mon.b calling monitor election 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: monmap epoch 4 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: last_changed 2026-03-09T20:13:26.430633+0000 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: created 2026-03-09T19:50:55.405959+0000 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: min_mon_release 19 (squid) 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: election_strategy: 1 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: 1: [v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0] mon.c 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: fsmap 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: mgrmap e45: y(active, since 6s), standbys: x 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: overall HEALTH_OK 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:27.625 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:27 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: mon.a calling monitor election 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: mon.c calling monitor election 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: mon.b calling monitor election 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: monmap epoch 4 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: last_changed 2026-03-09T20:13:26.430633+0000 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: created 2026-03-09T19:50:55.405959+0000 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: min_mon_release 19 (squid) 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: election_strategy: 1 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: 1: [v2:192.168.123.101:3301/0,v1:192.168.123.101:6790/0] mon.c 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: fsmap 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: mgrmap e45: y(active, since 6s), standbys: x 2026-03-09T20:13:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: overall HEALTH_OK 2026-03-09T20:13:27.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:27.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:27 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[97066]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[98877]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.005 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:28 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:28 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:28 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:28 vm06 ceph-mon[81259]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T20:13:29.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:28 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:29.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:28 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.203 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: Detected new or changed devices on vm01 2026-03-09T20:13:30.203 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: Detected new or changed devices on vm01 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: Reconfiguring mon.a (monmap changed)... 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: Reconfiguring daemon mon.a on vm01 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:13:30.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:13:30.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T20:13:30.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: Reconfiguring mon.a (monmap changed)... 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: Reconfiguring daemon mon.a on vm01 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: Detected new or changed devices on vm01 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.630 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: Reconfiguring mon.a (monmap changed)... 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: Reconfiguring daemon mon.a on vm01 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T20:13:30.631 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:31.037 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:30.740+0000 7f1345fe3640 -1 mgr.server handle_report got status from non-daemon mon.c 2026-03-09T20:13:31.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: Reconfiguring mgr.y (monmap changed)... 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: Reconfiguring daemon mgr.y on vm01 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: Reconfiguring mon.c (monmap changed)... 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: Reconfiguring daemon mon.c on vm01 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: Reconfiguring osd.0 (monmap changed)... 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: Reconfiguring daemon osd.0 on vm01 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: Reconfiguring mgr.y (monmap changed)... 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: Reconfiguring daemon mgr.y on vm01 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: Reconfiguring mon.c (monmap changed)... 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: Reconfiguring daemon mon.c on vm01 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: Reconfiguring osd.0 (monmap changed)... 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: Reconfiguring daemon osd.0 on vm01 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T20:13:31.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:31 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: Reconfiguring mgr.y (monmap changed)... 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: Reconfiguring daemon mgr.y on vm01 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: Reconfiguring mon.c (monmap changed)... 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: Reconfiguring daemon mon.c on vm01 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: Reconfiguring osd.0 (monmap changed)... 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: Reconfiguring daemon osd.0 on vm01 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T20:13:31.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:31.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:31.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T20:13:31.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:31 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: Reconfiguring osd.1 (monmap changed)... 2026-03-09T20:13:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: Reconfiguring daemon osd.1 on vm01 2026-03-09T20:13:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: Reconfiguring osd.2 (monmap changed)... 2026-03-09T20:13:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: Reconfiguring daemon osd.2 on vm01 2026-03-09T20:13:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: Reconfiguring osd.3 (monmap changed)... 2026-03-09T20:13:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: Reconfiguring daemon osd.3 on vm01 2026-03-09T20:13:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:13:32.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:32 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.606 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: Reconfiguring osd.1 (monmap changed)... 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: Reconfiguring daemon osd.1 on vm01 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: Reconfiguring osd.2 (monmap changed)... 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: Reconfiguring daemon osd.2 on vm01 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: Reconfiguring osd.3 (monmap changed)... 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: Reconfiguring daemon osd.3 on vm01 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:13:32.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: Reconfiguring osd.1 (monmap changed)... 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: Reconfiguring daemon osd.1 on vm01 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: Reconfiguring osd.2 (monmap changed)... 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: Reconfiguring daemon osd.2 on vm01 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: Reconfiguring osd.3 (monmap changed)... 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: Reconfiguring daemon osd.3 on vm01 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T20:13:32.608 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:32 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:32.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:13:32] "GET /metrics HTTP/1.1" 200 34775 "" "Prometheus/2.51.0" 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: Reconfiguring rgw.foo.vm01.zyuelz (monmap changed)... 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: Reconfiguring daemon rgw.foo.vm01.zyuelz on vm01 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: Reconfiguring mon.b (monmap changed)... 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: Reconfiguring daemon mon.b on vm06 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: Reconfiguring mgr.x (monmap changed)... 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: Reconfiguring daemon mgr.x on vm06 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T20:13:33.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: Reconfiguring rgw.foo.vm01.zyuelz (monmap changed)... 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: Reconfiguring daemon rgw.foo.vm01.zyuelz on vm01 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: Reconfiguring mon.b (monmap changed)... 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: Reconfiguring daemon mon.b on vm06 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: Reconfiguring mgr.x (monmap changed)... 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: Reconfiguring daemon mgr.x on vm06 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: Reconfiguring rgw.foo.vm01.zyuelz (monmap changed)... 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: Reconfiguring daemon rgw.foo.vm01.zyuelz on vm01 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: Reconfiguring mon.b (monmap changed)... 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: Reconfiguring daemon mon.b on vm06 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: Reconfiguring mgr.x (monmap changed)... 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: Reconfiguring daemon mgr.x on vm06 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T20:13:33.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring osd.4 (monmap changed)... 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring daemon osd.4 on vm06 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring osd.5 (monmap changed)... 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring daemon osd.5 on vm06 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring osd.6 (monmap changed)... 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring daemon osd.6 on vm06 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring osd.7 (monmap changed)... 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring daemon osd.7 on vm06 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring rgw.foo.vm06.lmiczb (monmap changed)... 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Reconfiguring daemon rgw.foo.vm06.lmiczb on vm06 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.771 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all mon 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all crash 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all mds 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all nfs 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all node-exporter 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all prometheus 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all alertmanager 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all grafana 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all loki 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all promtail 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Finalizing container_image settings 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:13:34.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: Upgrade: Complete! 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:34.773 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring osd.4 (monmap changed)... 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring daemon osd.4 on vm06 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring osd.5 (monmap changed)... 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring daemon osd.5 on vm06 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring osd.6 (monmap changed)... 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring daemon osd.6 on vm06 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring osd.7 (monmap changed)... 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring daemon osd.7 on vm06 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring rgw.foo.vm06.lmiczb (monmap changed)... 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Reconfiguring daemon rgw.foo.vm06.lmiczb on vm06 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all mon 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all crash 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all mds 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all nfs 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all node-exporter 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all prometheus 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all alertmanager 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all grafana 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all loki 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all promtail 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Finalizing container_image settings 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: Upgrade: Complete! 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:13:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring osd.4 (monmap changed)... 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring daemon osd.4 on vm06 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring osd.5 (monmap changed)... 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring daemon osd.5 on vm06 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring osd.6 (monmap changed)... 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring daemon osd.6 on vm06 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring osd.7 (monmap changed)... 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring daemon osd.7 on vm06 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring rgw.foo.vm06.lmiczb (monmap changed)... 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Reconfiguring daemon rgw.foo.vm06.lmiczb on vm06 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all mon 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all crash 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all mds 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all nfs 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all node-exporter 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all prometheus 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all alertmanager 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all grafana 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all loki 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all promtail 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Finalizing container_image settings 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:13:34.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: Upgrade: Complete! 2026-03-09T20:13:34.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:13:34.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:13:34.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:34.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:34.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:34.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:34.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:36.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:36.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:36.991Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:36.992Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:37 vm01 ceph-mon[97066]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 0 B/s wr, 17 op/s 2026-03-09T20:13:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:37 vm01 ceph-mon[98877]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 0 B/s wr, 17 op/s 2026-03-09T20:13:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:37 vm06 ceph-mon[81259]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 0 B/s wr, 17 op/s 2026-03-09T20:13:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:39 vm06 ceph-mon[81259]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 0 B/s wr, 66 op/s 2026-03-09T20:13:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:39 vm01 ceph-mon[97066]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 0 B/s wr, 66 op/s 2026-03-09T20:13:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:39 vm01 ceph-mon[98877]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 0 B/s wr, 66 op/s 2026-03-09T20:13:41.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:41 vm06 ceph-mon[81259]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 0 B/s wr, 60 op/s 2026-03-09T20:13:41.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:41 vm01 ceph-mon[97066]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 0 B/s wr, 60 op/s 2026-03-09T20:13:41.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:41 vm01 ceph-mon[98877]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 0 B/s wr, 60 op/s 2026-03-09T20:13:42.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:42 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:13:42] "GET /metrics HTTP/1.1" 200 37586 "" "Prometheus/2.51.0" 2026-03-09T20:13:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:43 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:43 vm06 ceph-mon[81259]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T20:13:43.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:43 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:43.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:43 vm01 ceph-mon[97066]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T20:13:43.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:43 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:43.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:43 vm01 ceph-mon[98877]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T20:13:44.589 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:13:45.312 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:13:45.312 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (13m) 17s ago 20m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:13:45.312 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (86s) 23s ago 20m 78.9M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:13:45.312 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (93s) 17s ago 20m 47.9M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (98s) 23s ago 21m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (10m) 17s ago 22m 542M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (30s) 17s ago 22m 45.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (66s) 23s ago 22m 38.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (19s) 17s ago 22m 24.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (13m) 17s ago 20m 10.2M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (13m) 23s ago 20m 10.2M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (21m) 17s ago 21m 56.1M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (21m) 17s ago 21m 54.8M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (21m) 17s ago 21m 51.1M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (21m) 17s ago 21m 55.3M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (21m) 23s ago 21m 56.1M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (21m) 23s ago 21m 54.5M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (21m) 23s ago 21m 51.1M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (20m) 23s ago 20m 55.8M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (100s) 23s ago 20m 52.2M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (20m) 17s ago 20m 96.6M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:13:45.313 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (20m) 23s ago 20m 95.3M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:13:45.383 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mon | length == 1'"'"'' 2026-03-09T20:13:45.611 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:45 vm01 ceph-mon[97066]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T20:13:45.611 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:45 vm01 ceph-mon[98877]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T20:13:45.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:45 vm06 ceph-mon[81259]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T20:13:46.070 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:13:46.377 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mon | keys'"'"' | grep $sha1' 2026-03-09T20:13:46.551 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:46 vm01 ceph-mon[98877]: from='client.44124 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:46.551 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:46 vm01 ceph-mon[98877]: from='client.44130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:46.551 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:46 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/3268136115' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:46.551 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:46 vm01 ceph-mon[97066]: from='client.44124 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:46.551 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:46 vm01 ceph-mon[97066]: from='client.44130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:46.551 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:46 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/3268136115' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:46.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:46 vm06 ceph-mon[81259]: from='client.44124 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:46.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:46 vm06 ceph-mon[81259]: from='client.44130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:46.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:46 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/3268136115' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:46.992 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T20:13:47.322 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:46.991Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:47.322 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:46 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:46.995Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:47.722 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 5'"'"'' 2026-03-09T20:13:47.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:47 vm01 ceph-mon[98877]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T20:13:47.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:47 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/2580141886' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:47.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:47 vm01 ceph-mon[97066]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T20:13:47.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:47 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/2580141886' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:48.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:47 vm06 ceph-mon[81259]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T20:13:48.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:47 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/2580141886' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:48.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:48 vm01 ceph-mon[97066]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 109 op/s 2026-03-09T20:13:48.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:48 vm01 ceph-mon[98877]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 109 op/s 2026-03-09T20:13:49.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:48 vm06 ceph-mon[81259]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 109 op/s 2026-03-09T20:13:49.303 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:13:49.388 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T20:13:49.686 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:49 vm01 ceph-mon[98877]: from='client.54121 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:49.686 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:49 vm01 ceph-mon[97066]: from='client.54121 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:49.958 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:13:49.958 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": null, 2026-03-09T20:13:49.958 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": false, 2026-03-09T20:13:49.958 INFO:teuthology.orchestra.run.vm01.stdout: "which": "", 2026-03-09T20:13:49.958 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:13:49.958 INFO:teuthology.orchestra.run.vm01.stdout: "progress": null, 2026-03-09T20:13:49.958 INFO:teuthology.orchestra.run.vm01.stdout: "message": "", 2026-03-09T20:13:49.958 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:13:49.958 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:13:49.994 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:49 vm06 ceph-mon[81259]: from='client.54121 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:50.027 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:13:50.593 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T20:13:50.680 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 2' 2026-03-09T20:13:50.843 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:50 vm01 ceph-mon[97066]: from='client.44148 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:50.843 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:50 vm01 ceph-mon[97066]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 0 B/s wr, 60 op/s 2026-03-09T20:13:50.843 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:50 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:50.843 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:50 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/951783378' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:13:50.844 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:50 vm01 ceph-mon[98877]: from='client.44148 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:50.844 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:50 vm01 ceph-mon[98877]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 0 B/s wr, 60 op/s 2026-03-09T20:13:50.844 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:50 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:50.844 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:50 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/951783378' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:13:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:50 vm06 ceph-mon[81259]: from='client.44148 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:50 vm06 ceph-mon[81259]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 0 B/s wr, 60 op/s 2026-03-09T20:13:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:50 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:13:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:50 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/951783378' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:13:51.588 INFO:teuthology.orchestra.run.vm01.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:51.686 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T20:13:51.813 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[97066]: from='client.54139 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "osd", "limit": 2, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:51.813 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:51.813 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:51.813 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:51.813 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:51.814 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[98877]: from='client.54139 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "osd", "limit": 2, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:51.814 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:51.814 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:51.814 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:51.814 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:51 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:52.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:51 vm06 ceph-mon[81259]: from='client.54139 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "osd", "limit": 2, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:52.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:51 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:52.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:51 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:52.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:51 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:13:52.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:51 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:52.369 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:13:52.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:13:52] "GET /metrics HTTP/1.1" 200 37588 "" "Prometheus/2.51.0" 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (13m) 25s ago 20m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (93s) 31s ago 20m 78.9M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (101s) 25s ago 20m 47.9M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (105s) 31s ago 22m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (10m) 25s ago 22m 542M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (37s) 25s ago 22m 45.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (74s) 31s ago 22m 38.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (27s) 25s ago 22m 24.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (13m) 25s ago 20m 10.2M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (13m) 31s ago 20m 10.2M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (21m) 25s ago 21m 56.1M 4096M 17.2.0 e1d6a67b021e 8bd5f42493be 2026-03-09T20:13:52.863 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (21m) 25s ago 21m 54.8M 4096M 17.2.0 e1d6a67b021e c1c4156caab8 2026-03-09T20:13:52.864 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (21m) 25s ago 21m 51.1M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:13:52.864 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (21m) 25s ago 21m 55.3M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:13:52.864 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (21m) 31s ago 21m 56.1M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:13:52.864 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (21m) 31s ago 21m 54.5M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:13:52.864 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (21m) 31s ago 21m 51.1M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:13:52.864 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (21m) 31s ago 21m 55.8M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:13:52.864 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (107s) 31s ago 20m 52.2M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:13:52.864 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (20m) 25s ago 20m 96.6M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:13:52.864 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (20m) 31s ago 20m 95.3M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:13:53.142 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:13:53.142 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:13:53.142 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 10, 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:13:53.143 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 0 B/s wr, 60 op/s 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all mgr 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all mon 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:53.325 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all crash 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: Upgrade: osd.0 is safe to restart 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='client.54145 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 0 B/s wr, 60 op/s 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all mgr 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all mon 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all crash 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: Upgrade: osd.0 is safe to restart 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='client.54145 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T20:13:53.326 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:53 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:53.387 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:13:53.387 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:13:53.387 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:13:53.387 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons of type(s) osd. Upgrade limited to 2 daemons (2 remaining).", 2026-03-09T20:13:53.387 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:13:53.387 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "0/8 daemons upgraded", 2026-03-09T20:13:53.387 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T20:13:53.387 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:13:53.387 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:13:53.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 0 B/s wr, 60 op/s 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all mgr 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all mon 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all crash 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: Upgrade: osd.0 is safe to restart 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='client.54145 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T20:13:53.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:53 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:13:53.879 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:53 vm01 systemd[1]: Stopping Ceph osd.0 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:13:53.879 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[56947]: 2026-03-09T20:13:53.559+0000 7fc48646b700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:13:53.879 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[56947]: 2026-03-09T20:13:53.559+0000 7fc48646b700 -1 osd.0 96 *** Got signal Terminated *** 2026-03-09T20:13:53.879 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:53 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[56947]: 2026-03-09T20:13:53.559+0000 7fc48646b700 -1 osd.0 96 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[98877]: Upgrade: Updating osd.0 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[98877]: Deploying daemon osd.0 on vm01 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[98877]: from='client.54148 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[98877]: from='client.54151 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/653824403' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[98877]: from='client.54163 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[98877]: osd.0 marked itself down and dead 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[97066]: Upgrade: Updating osd.0 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[97066]: Deploying daemon osd.0 on vm01 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[97066]: from='client.54148 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[97066]: from='client.54151 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/653824403' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[97066]: from='client.54163 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:54.450 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:54 vm01 ceph-mon[97066]: osd.0 marked itself down and dead 2026-03-09T20:13:54.451 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[103987]: 2026-03-09 20:13:54.185219973 +0000 UTC m=+0.662264860 container died 8bd5f42493becfede0a1dab43736d380d8851ae4f6835012477e4a4cb55e4354 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0, name=centos-stream, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, GIT_CLEAN=True, architecture=x86_64, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, GIT_BRANCH=HEAD, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, RELEASE=HEAD, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, vcs-type=git, com.redhat.component=centos-stream-container, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, maintainer=Guillaume Abrioux ) 2026-03-09T20:13:54.451 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[103987]: 2026-03-09 20:13:54.291436932 +0000 UTC m=+0.768481819 container remove 8bd5f42493becfede0a1dab43736d380d8851ae4f6835012477e4a4cb55e4354 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, distribution-scope=public, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, GIT_BRANCH=HEAD, architecture=x86_64, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vendor=Red Hat, Inc., ceph=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, io.openshift.expose-services=, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, name=centos-stream, maintainer=Guillaume Abrioux , RELEASE=HEAD, release=754, GIT_CLEAN=True) 2026-03-09T20:13:54.451 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 bash[103987]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0 2026-03-09T20:13:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:54 vm06 ceph-mon[81259]: Upgrade: Updating osd.0 2026-03-09T20:13:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:54 vm06 ceph-mon[81259]: Deploying daemon osd.0 on vm01 2026-03-09T20:13:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:54 vm06 ceph-mon[81259]: from='client.54148 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:54 vm06 ceph-mon[81259]: from='client.54151 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:54 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/653824403' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:13:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:54 vm06 ceph-mon[81259]: from='client.54163 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:13:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:54 vm06 ceph-mon[81259]: osd.0 marked itself down and dead 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[104052]: 2026-03-09 20:13:54.450735376 +0000 UTC m=+0.018637576 container create 2d4cbd70d0f2d9f972b6cbde01e1521bddc7c416706a2126e9171e89f6c92354 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid) 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[104052]: 2026-03-09 20:13:54.493359417 +0000 UTC m=+0.061261617 container init 2d4cbd70d0f2d9f972b6cbde01e1521bddc7c416706a2126e9171e89f6c92354 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2) 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[104052]: 2026-03-09 20:13:54.497654175 +0000 UTC m=+0.065556375 container start 2d4cbd70d0f2d9f972b6cbde01e1521bddc7c416706a2126e9171e89f6c92354 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.build-date=20260223) 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[104052]: 2026-03-09 20:13:54.498766457 +0000 UTC m=+0.066668657 container attach 2d4cbd70d0f2d9f972b6cbde01e1521bddc7c416706a2126e9171e89f6c92354 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[104052]: 2026-03-09 20:13:54.443326157 +0000 UTC m=+0.011228368 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 conmon[104063]: conmon 2d4cbd70d0f2d9f972b6 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-2d4cbd70d0f2d9f972b6cbde01e1521bddc7c416706a2126e9171e89f6c92354.scope/container/memory.events 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[104052]: 2026-03-09 20:13:54.642963294 +0000 UTC m=+0.210865495 container died 2d4cbd70d0f2d9f972b6cbde01e1521bddc7c416706a2126e9171e89f6c92354 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0) 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[104052]: 2026-03-09 20:13:54.66451865 +0000 UTC m=+0.232420850 container remove 2d4cbd70d0f2d9f972b6cbde01e1521bddc7c416706a2126e9171e89f6c92354 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.0.service: Deactivated successfully. 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 systemd[1]: Stopped Ceph osd.0 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:13:54.751 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.0.service: Consumed 7.040s CPU time. 2026-03-09T20:13:55.120 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 systemd[1]: Starting Ceph osd.0 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:13:55.120 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:54 vm01 podman[104152]: 2026-03-09 20:13:54.972198823 +0000 UTC m=+0.018979625 container create 9edc193250e001055d657966878558b2ee75189b14ce789143f4242f6860118d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:13:55.120 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 podman[104152]: 2026-03-09 20:13:55.013842759 +0000 UTC m=+0.060623571 container init 9edc193250e001055d657966878558b2ee75189b14ce789143f4242f6860118d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0) 2026-03-09T20:13:55.120 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 podman[104152]: 2026-03-09 20:13:55.019209274 +0000 UTC m=+0.065990086 container start 9edc193250e001055d657966878558b2ee75189b14ce789143f4242f6860118d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:13:55.121 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 podman[104152]: 2026-03-09 20:13:55.023139621 +0000 UTC m=+0.069920443 container attach 9edc193250e001055d657966878558b2ee75189b14ce789143f4242f6860118d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid) 2026-03-09T20:13:55.121 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 podman[104152]: 2026-03-09 20:13:54.964287666 +0000 UTC m=+0.011068488 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:55.121 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:13:55.121 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:13:55.121 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:13:55.121 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:13:55.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:55 vm01 ceph-mon[97066]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T20:13:55.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:55 vm01 ceph-mon[97066]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:13:55.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:55 vm01 ceph-mon[97066]: osdmap e97: 8 total, 7 up, 8 in 2026-03-09T20:13:55.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:55 vm01 ceph-mon[98877]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T20:13:55.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:55 vm01 ceph-mon[98877]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:13:55.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:55 vm01 ceph-mon[98877]: osdmap e97: 8 total, 7 up, 8 in 2026-03-09T20:13:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:55 vm06 ceph-mon[81259]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T20:13:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:55 vm06 ceph-mon[81259]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:13:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:55 vm06 ceph-mon[81259]: osdmap e97: 8 total, 7 up, 8 in 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-13610d26-e355-4cfb-b355-b3ba8c5192b0/osd-block-f103ea3b-bf46-454a-98e8-0d7aaed22607 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T20:13:55.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-13610d26-e355-4cfb-b355-b3ba8c5192b0/osd-block-f103ea3b-bf46-454a-98e8-0d7aaed22607 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T20:13:56.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:56 vm01 ceph-mon[97066]: osdmap e98: 8 total, 7 up, 8 in 2026-03-09T20:13:56.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:56 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:56.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:56 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:56.380 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:56 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/ln -snf /dev/ceph-13610d26-e355-4cfb-b355-b3ba8c5192b0/osd-block-f103ea3b-bf46-454a-98e8-0d7aaed22607 /var/lib/ceph/osd/ceph-0/block 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/ln -snf /dev/ceph-13610d26-e355-4cfb-b355-b3ba8c5192b0/osd-block-f103ea3b-bf46-454a-98e8-0d7aaed22607 /var/lib/ceph/osd/ceph-0/block 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate[104163]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 bash[104152]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 conmon[104163]: conmon 9edc193250e001055d65 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-9edc193250e001055d657966878558b2ee75189b14ce789143f4242f6860118d.scope/container/memory.events 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 podman[104152]: 2026-03-09 20:13:55.95891624 +0000 UTC m=+1.005697052 container died 9edc193250e001055d657966878558b2ee75189b14ce789143f4242f6860118d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:55 vm01 podman[104152]: 2026-03-09 20:13:55.979968824 +0000 UTC m=+1.026749626 container remove 9edc193250e001055d657966878558b2ee75189b14ce789143f4242f6860118d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0) 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:56 vm01 podman[104398]: 2026-03-09 20:13:56.065643774 +0000 UTC m=+0.019749957 container create 53027cc5a47f8251440af90d495629dac4b6977f9e62cff997101284853f7a3a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS) 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:56 vm01 podman[104398]: 2026-03-09 20:13:56.106826588 +0000 UTC m=+0.060932771 container init 53027cc5a47f8251440af90d495629dac4b6977f9e62cff997101284853f7a3a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2) 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:56 vm01 podman[104398]: 2026-03-09 20:13:56.111763949 +0000 UTC m=+0.065870132 container start 53027cc5a47f8251440af90d495629dac4b6977f9e62cff997101284853f7a3a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:56 vm01 bash[104398]: 53027cc5a47f8251440af90d495629dac4b6977f9e62cff997101284853f7a3a 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:56 vm01 podman[104398]: 2026-03-09 20:13:56.057630486 +0000 UTC m=+0.011736678 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:13:56.380 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:56 vm01 systemd[1]: Started Ceph osd.0 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:13:56.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:56 vm01 ceph-mon[98877]: osdmap e98: 8 total, 7 up, 8 in 2026-03-09T20:13:56.380 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:56 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:56.381 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:56 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:56.381 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:56 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:56.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:56 vm06 ceph-mon[81259]: osdmap e98: 8 total, 7 up, 8 in 2026-03-09T20:13:56.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:56 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:56.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:56 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:56.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:56 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:57.102 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[104408]: 2026-03-09T20:13:56.935+0000 7f855e1f1740 -1 Falling back to public interface 2026-03-09T20:13:57.102 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:56.994Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:57.102 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:13:56 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:13:56.995Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:13:57.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:57 vm01 ceph-mon[97066]: pgmap v23: 161 pgs: 4 active+undersized, 17 stale+active+clean, 6 active+undersized+degraded, 134 active+clean; 457 KiB data, 104 MiB used, 160 GiB / 160 GiB avail; 22/627 objects degraded (3.509%) 2026-03-09T20:13:57.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:57 vm01 ceph-mon[97066]: Health check failed: Degraded data redundancy: 22/627 objects degraded (3.509%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T20:13:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:57 vm01 ceph-mon[98877]: pgmap v23: 161 pgs: 4 active+undersized, 17 stale+active+clean, 6 active+undersized+degraded, 134 active+clean; 457 KiB data, 104 MiB used, 160 GiB / 160 GiB avail; 22/627 objects degraded (3.509%) 2026-03-09T20:13:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:57 vm01 ceph-mon[98877]: Health check failed: Degraded data redundancy: 22/627 objects degraded (3.509%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T20:13:57.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:57 vm06 ceph-mon[81259]: pgmap v23: 161 pgs: 4 active+undersized, 17 stale+active+clean, 6 active+undersized+degraded, 134 active+clean; 457 KiB data, 104 MiB used, 160 GiB / 160 GiB avail; 22/627 objects degraded (3.509%) 2026-03-09T20:13:57.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:57 vm06 ceph-mon[81259]: Health check failed: Degraded data redundancy: 22/627 objects degraded (3.509%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T20:13:57.879 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[104408]: 2026-03-09T20:13:57.540+0000 7f855e1f1740 -1 osd.0 0 read_superblock omap replica is missing. 2026-03-09T20:13:57.879 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[104408]: 2026-03-09T20:13:57.553+0000 7f855e1f1740 -1 osd.0 96 log_to_monitors true 2026-03-09T20:13:58.580 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:58 vm01 ceph-mon[97066]: from='osd.0 [v2:192.168.123.101:6802/379916617,v1:192.168.123.101:6803/379916617]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T20:13:58.580 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:58 vm01 ceph-mon[97066]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T20:13:58.580 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:58.580 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:58.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:58 vm01 ceph-mon[98877]: from='osd.0 [v2:192.168.123.101:6802/379916617,v1:192.168.123.101:6803/379916617]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T20:13:58.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:58 vm01 ceph-mon[98877]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T20:13:58.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:58.580 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:58.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:58 vm06 ceph-mon[81259]: from='osd.0 [v2:192.168.123.101:6802/379916617,v1:192.168.123.101:6803/379916617]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T20:13:58.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:58 vm06 ceph-mon[81259]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T20:13:58.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:58 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:58.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:58 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:59 vm06 ceph-mon[81259]: pgmap v24: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 104 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-09T20:13:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:59 vm06 ceph-mon[81259]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T20:13:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:59 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:59 vm06 ceph-mon[81259]: osdmap e99: 8 total, 7 up, 8 in 2026-03-09T20:13:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:59 vm06 ceph-mon[81259]: from='osd.0 [v2:192.168.123.101:6802/379916617,v1:192.168.123.101:6803/379916617]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:13:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:59 vm06 ceph-mon[81259]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:13:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:13:59 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:59.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[97066]: pgmap v24: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 104 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-09T20:13:59.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[97066]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T20:13:59.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:59.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[97066]: osdmap e99: 8 total, 7 up, 8 in 2026-03-09T20:13:59.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[97066]: from='osd.0 [v2:192.168.123.101:6802/379916617,v1:192.168.123.101:6803/379916617]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:13:59.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[97066]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:13:59.625 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:59.626 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[98877]: pgmap v24: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 104 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-09T20:13:59.626 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[98877]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T20:13:59.626 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:59.626 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[98877]: osdmap e99: 8 total, 7 up, 8 in 2026-03-09T20:13:59.626 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[98877]: from='osd.0 [v2:192.168.123.101:6802/379916617,v1:192.168.123.101:6803/379916617]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:13:59.626 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[98877]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:13:59.626 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:13:59 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:13:59.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:13:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:13:59.869+0000 7f1345fe3640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T20:13:59.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:13:59 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[104408]: 2026-03-09T20:13:59.827+0000 7f855579b640 -1 osd.0 96 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T20:14:01.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:01.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:01.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:01.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: OSD bench result of 19916.401833 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T20:14:01.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:00 vm06 ceph-mon[81259]: pgmap v26: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-09T20:14:01.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: OSD bench result of 19916.401833 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[97066]: pgmap v26: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: OSD bench result of 19916.401833 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T20:14:01.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:00 vm01 ceph-mon[98877]: pgmap v26: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-09T20:14:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:01 vm06 ceph-mon[81259]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:14:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:01 vm06 ceph-mon[81259]: osd.0 [v2:192.168.123.101:6802/379916617,v1:192.168.123.101:6803/379916617] boot 2026-03-09T20:14:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:01 vm06 ceph-mon[81259]: osdmap e100: 8 total, 8 up, 8 in 2026-03-09T20:14:02.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:01 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:14:02.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:01 vm01 ceph-mon[97066]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:14:02.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:01 vm01 ceph-mon[97066]: osd.0 [v2:192.168.123.101:6802/379916617,v1:192.168.123.101:6803/379916617] boot 2026-03-09T20:14:02.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:01 vm01 ceph-mon[97066]: osdmap e100: 8 total, 8 up, 8 in 2026-03-09T20:14:02.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:01 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:14:02.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:01 vm01 ceph-mon[98877]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:14:02.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:01 vm01 ceph-mon[98877]: osd.0 [v2:192.168.123.101:6802/379916617,v1:192.168.123.101:6803/379916617] boot 2026-03-09T20:14:02.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:01 vm01 ceph-mon[98877]: osdmap e100: 8 total, 8 up, 8 in 2026-03-09T20:14:02.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:01 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T20:14:02.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:14:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:14:02] "GET /metrics HTTP/1.1" 200 37588 "" "Prometheus/2.51.0" 2026-03-09T20:14:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:02 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:02 vm06 ceph-mon[81259]: osdmap e101: 8 total, 8 up, 8 in 2026-03-09T20:14:03.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:02 vm06 ceph-mon[81259]: pgmap v29: 161 pgs: 37 peering, 13 active+undersized, 8 active+undersized+degraded, 103 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s; 30/627 objects degraded (4.785%) 2026-03-09T20:14:03.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:02 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:03.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:02 vm01 ceph-mon[97066]: osdmap e101: 8 total, 8 up, 8 in 2026-03-09T20:14:03.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:02 vm01 ceph-mon[97066]: pgmap v29: 161 pgs: 37 peering, 13 active+undersized, 8 active+undersized+degraded, 103 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s; 30/627 objects degraded (4.785%) 2026-03-09T20:14:03.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:02 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:03.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:02 vm01 ceph-mon[98877]: osdmap e101: 8 total, 8 up, 8 in 2026-03-09T20:14:03.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:02 vm01 ceph-mon[98877]: pgmap v29: 161 pgs: 37 peering, 13 active+undersized, 8 active+undersized+degraded, 103 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s; 30/627 objects degraded (4.785%) 2026-03-09T20:14:04.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:03 vm06 ceph-mon[81259]: Health check update: Degraded data redundancy: 30/627 objects degraded (4.785%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:04.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:03 vm01 ceph-mon[97066]: Health check update: Degraded data redundancy: 30/627 objects degraded (4.785%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:04.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:03 vm01 ceph-mon[98877]: Health check update: Degraded data redundancy: 30/627 objects degraded (4.785%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:05.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:04 vm06 ceph-mon[81259]: pgmap v30: 161 pgs: 37 peering, 124 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:05.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:04 vm01 ceph-mon[97066]: pgmap v30: 161 pgs: 37 peering, 124 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:05.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:04 vm01 ceph-mon[98877]: pgmap v30: 161 pgs: 37 peering, 124 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:06.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:05 vm06 ceph-mon[81259]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 30/627 objects degraded (4.785%), 8 pgs degraded) 2026-03-09T20:14:06.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:05 vm06 ceph-mon[81259]: Cluster is now healthy 2026-03-09T20:14:06.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:05 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:06.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:05 vm01 ceph-mon[97066]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 30/627 objects degraded (4.785%), 8 pgs degraded) 2026-03-09T20:14:06.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:05 vm01 ceph-mon[97066]: Cluster is now healthy 2026-03-09T20:14:06.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:05 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:06.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:05 vm01 ceph-mon[98877]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 30/627 objects degraded (4.785%), 8 pgs degraded) 2026-03-09T20:14:06.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:05 vm01 ceph-mon[98877]: Cluster is now healthy 2026-03-09T20:14:06.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:05 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:07.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:07 vm06 ceph-mon[81259]: pgmap v31: 161 pgs: 31 peering, 130 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 654 B/s rd, 0 op/s 2026-03-09T20:14:07.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:07 vm01 ceph-mon[98877]: pgmap v31: 161 pgs: 31 peering, 130 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 654 B/s rd, 0 op/s 2026-03-09T20:14:07.379 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:06.995Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:07.379 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:06 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:06.996Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:07.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:07 vm01 ceph-mon[97066]: pgmap v31: 161 pgs: 31 peering, 130 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 654 B/s rd, 0 op/s 2026-03-09T20:14:09.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:09 vm06 ceph-mon[81259]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:09.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:09 vm01 ceph-mon[97066]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:09.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:09 vm01 ceph-mon[98877]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:11.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:11 vm06 ceph-mon[81259]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:14:11.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:11 vm01 ceph-mon[97066]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:14:11.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:11 vm01 ceph-mon[98877]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:14:12.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:14:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:14:12] "GET /metrics HTTP/1.1" 200 37596 "" "Prometheus/2.51.0" 2026-03-09T20:14:13.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:13 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:13.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:13 vm06 ceph-mon[81259]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1003 B/s rd, 0 op/s 2026-03-09T20:14:13.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:13 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:13.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:13 vm01 ceph-mon[97066]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1003 B/s rd, 0 op/s 2026-03-09T20:14:13.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:13 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:13.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:13 vm01 ceph-mon[98877]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1003 B/s rd, 0 op/s 2026-03-09T20:14:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:15 vm01 ceph-mon[97066]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:15 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:15 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:15 vm01 ceph-mon[98877]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:15 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:15 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:15 vm06 ceph-mon[81259]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:15 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:15 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:16.321 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:16.321 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[97066]: Upgrade: osd.1 is safe to restart 2026-03-09T20:14:16.321 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[97066]: Upgrade: Updating osd.1 2026-03-09T20:14:16.321 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:16.321 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T20:14:16.321 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[97066]: Deploying daemon osd.1 on vm01 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[97066]: osd.1 marked itself down and dead 2026-03-09T20:14:16.322 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 systemd[1]: Stopping Ceph osd.1 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:14:16.322 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[59666]: 2026-03-09T20:14:16.143+0000 7f63ba6aa700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:14:16.322 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[59666]: 2026-03-09T20:14:16.143+0000 7f63ba6aa700 -1 osd.1 101 *** Got signal Terminated *** 2026-03-09T20:14:16.322 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[59666]: 2026-03-09T20:14:16.144+0000 7f63ba6aa700 -1 osd.1 101 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[98877]: Upgrade: osd.1 is safe to restart 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[98877]: Upgrade: Updating osd.1 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[98877]: Deploying daemon osd.1 on vm01 2026-03-09T20:14:16.322 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:16 vm01 ceph-mon[98877]: osd.1 marked itself down and dead 2026-03-09T20:14:16.585 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 podman[108216]: 2026-03-09 20:14:16.320316243 +0000 UTC m=+0.188023950 container died c1c4156caab8bd9b3960a296417810561717675946dd332ddf3f5b2a24046cde (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1, CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, GIT_BRANCH=HEAD, build-date=2022-05-03T08:36:31.336870, ceph=True, io.buildah.version=1.19.8, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, RELEASE=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, architecture=x86_64, com.redhat.component=centos-stream-container, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, vendor=Red Hat, Inc., version=8, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754) 2026-03-09T20:14:16.586 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 podman[108216]: 2026-03-09 20:14:16.346627762 +0000 UTC m=+0.214335469 container remove c1c4156caab8bd9b3960a296417810561717675946dd332ddf3f5b2a24046cde (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, GIT_BRANCH=HEAD, maintainer=Guillaume Abrioux , version=8, build-date=2022-05-03T08:36:31.336870, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8, io.buildah.version=1.19.8, CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, io.openshift.expose-services=, release=754, ceph=True, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream) 2026-03-09T20:14:16.586 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 bash[108216]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1 2026-03-09T20:14:16.586 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 podman[108283]: 2026-03-09 20:14:16.493134051 +0000 UTC m=+0.016056274 container create 10a987c65c164a75e5b875667fed0dec1a245e37bf37792526bf21cb63b27be3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , ceph=True, OSD_FLAVOR=default, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:14:16.586 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 podman[108283]: 2026-03-09 20:14:16.534445225 +0000 UTC m=+0.057367468 container init 10a987c65c164a75e5b875667fed0dec1a245e37bf37792526bf21cb63b27be3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:14:16.586 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 podman[108283]: 2026-03-09 20:14:16.537699446 +0000 UTC m=+0.060621669 container start 10a987c65c164a75e5b875667fed0dec1a245e37bf37792526bf21cb63b27be3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:14:16.586 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 podman[108283]: 2026-03-09 20:14:16.540190027 +0000 UTC m=+0.063112250 container attach 10a987c65c164a75e5b875667fed0dec1a245e37bf37792526bf21cb63b27be3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:14:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:16 vm06 ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T20:14:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:16 vm06 ceph-mon[81259]: Upgrade: osd.1 is safe to restart 2026-03-09T20:14:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:16 vm06 ceph-mon[81259]: Upgrade: Updating osd.1 2026-03-09T20:14:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:16 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:16 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T20:14:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:16 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:16 vm06 ceph-mon[81259]: Deploying daemon osd.1 on vm01 2026-03-09T20:14:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:16 vm06 ceph-mon[81259]: osd.1 marked itself down and dead 2026-03-09T20:14:16.878 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 podman[108283]: 2026-03-09 20:14:16.486625349 +0000 UTC m=+0.009547582 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:16.879 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 conmon[108294]: conmon 10a987c65c164a75e5b8 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-10a987c65c164a75e5b875667fed0dec1a245e37bf37792526bf21cb63b27be3.scope/container/memory.events 2026-03-09T20:14:16.879 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 podman[108283]: 2026-03-09 20:14:16.688841994 +0000 UTC m=+0.211764217 container died 10a987c65c164a75e5b875667fed0dec1a245e37bf37792526bf21cb63b27be3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-09T20:14:16.879 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 podman[108283]: 2026-03-09 20:14:16.80261713 +0000 UTC m=+0.325539353 container remove 10a987c65c164a75e5b875667fed0dec1a245e37bf37792526bf21cb63b27be3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:14:16.879 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.1.service: Deactivated successfully. 2026-03-09T20:14:16.879 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 systemd[1]: Stopped Ceph osd.1 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:14:16.879 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:16 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.1.service: Consumed 7.456s CPU time. 2026-03-09T20:14:17.159 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:16 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:16.998Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:17.159 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:17.001Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:17.159 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 systemd[1]: Starting Ceph osd.1 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:14:17.159 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 podman[108385]: 2026-03-09 20:14:17.132679647 +0000 UTC m=+0.018018357 container create fc6e32cde902e62f9dfdd965a0077bfe4ce3cf1b4742b5d97df54f9de5898abc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.41.3) 2026-03-09T20:14:17.564 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 podman[108385]: 2026-03-09 20:14:17.182442849 +0000 UTC m=+0.067781559 container init fc6e32cde902e62f9dfdd965a0077bfe4ce3cf1b4742b5d97df54f9de5898abc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default) 2026-03-09T20:14:17.564 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 podman[108385]: 2026-03-09 20:14:17.186007172 +0000 UTC m=+0.071345882 container start fc6e32cde902e62f9dfdd965a0077bfe4ce3cf1b4742b5d97df54f9de5898abc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=squid) 2026-03-09T20:14:17.564 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 podman[108385]: 2026-03-09 20:14:17.190129076 +0000 UTC m=+0.075467797 container attach fc6e32cde902e62f9dfdd965a0077bfe4ce3cf1b4742b5d97df54f9de5898abc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:14:17.564 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 podman[108385]: 2026-03-09 20:14:17.124518611 +0000 UTC m=+0.009857331 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:17.564 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:17.564 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 bash[108385]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:17.564 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:17.564 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 bash[108385]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:17 vm06 ceph-mon[81259]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:17 vm06 ceph-mon[81259]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:14:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:17 vm06 ceph-mon[81259]: osdmap e102: 8 total, 7 up, 8 in 2026-03-09T20:14:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:17 vm01 ceph-mon[97066]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:17 vm01 ceph-mon[97066]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:14:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:17 vm01 ceph-mon[97066]: osdmap e102: 8 total, 7 up, 8 in 2026-03-09T20:14:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:17 vm01 ceph-mon[98877]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:17.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:17 vm01 ceph-mon[98877]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:14:17.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:17 vm01 ceph-mon[98877]: osdmap e102: 8 total, 7 up, 8 in 2026-03-09T20:14:18.219 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:14:18.219 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 bash[108385]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:14:18.219 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 bash[108385]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:18.219 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 bash[108385]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 bash[108385]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8f76e561-0a22-4235-9125-5a1abf902c43/osd-block-52b44b6f-8ac7-4e25-969b-53f2808d1cb7 --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:17 vm01 bash[108385]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8f76e561-0a22-4235-9125-5a1abf902c43/osd-block-52b44b6f-8ac7-4e25-969b-53f2808d1cb7 --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/ln -snf /dev/ceph-8f76e561-0a22-4235-9125-5a1abf902c43/osd-block-52b44b6f-8ac7-4e25-969b-53f2808d1cb7 /var/lib/ceph/osd/ceph-1/block 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 bash[108385]: Running command: /usr/bin/ln -snf /dev/ceph-8f76e561-0a22-4235-9125-5a1abf902c43/osd-block-52b44b6f-8ac7-4e25-969b-53f2808d1cb7 /var/lib/ceph/osd/ceph-1/block 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 bash[108385]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 bash[108385]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 bash[108385]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate[108397]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 bash[108385]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 podman[108385]: 2026-03-09 20:14:18.065870977 +0000 UTC m=+0.951209687 container died fc6e32cde902e62f9dfdd965a0077bfe4ce3cf1b4742b5d97df54f9de5898abc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 podman[108385]: 2026-03-09 20:14:18.089213127 +0000 UTC m=+0.974551837 container remove fc6e32cde902e62f9dfdd965a0077bfe4ce3cf1b4742b5d97df54f9de5898abc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-activate, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223) 2026-03-09T20:14:18.220 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 podman[108643]: 2026-03-09 20:14:18.219162858 +0000 UTC m=+0.042484281 container create 9a6b315e40321ef626ac130d791d23fc529cd645495fdb9276f9238c54343824 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:14:18.342 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:14:17 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=infra.usagestats t=2026-03-09T20:14:17.945300229Z level=info msg="Usage stats are ready to report" 2026-03-09T20:14:18.565 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[97066]: osdmap e103: 8 total, 7 up, 8 in 2026-03-09T20:14:18.566 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[98877]: osdmap e103: 8 total, 7 up, 8 in 2026-03-09T20:14:18.566 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[98877]: pgmap v39: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:18.566 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:18.566 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:18.566 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:18.568 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 podman[108643]: 2026-03-09 20:14:18.186079755 +0000 UTC m=+0.009401187 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:18.568 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 podman[108643]: 2026-03-09 20:14:18.342539134 +0000 UTC m=+0.165860557 container init 9a6b315e40321ef626ac130d791d23fc529cd645495fdb9276f9238c54343824 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:14:18.568 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 podman[108643]: 2026-03-09 20:14:18.345784688 +0000 UTC m=+0.169106102 container start 9a6b315e40321ef626ac130d791d23fc529cd645495fdb9276f9238c54343824 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.schema-version=1.0) 2026-03-09T20:14:18.568 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 bash[108643]: 9a6b315e40321ef626ac130d791d23fc529cd645495fdb9276f9238c54343824 2026-03-09T20:14:18.568 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 systemd[1]: Started Ceph osd.1 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:14:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:18 vm06 ceph-mon[81259]: osdmap e103: 8 total, 7 up, 8 in 2026-03-09T20:14:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:18 vm06 ceph-mon[81259]: pgmap v39: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:18 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:18 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:18 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[97066]: pgmap v39: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:18.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:18.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:18.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:18 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:19.285 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:18 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[108655]: 2026-03-09T20:14:18.926+0000 7feac9c9b740 -1 Falling back to public interface 2026-03-09T20:14:20.378 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[108655]: 2026-03-09T20:14:20.063+0000 7feac9c9b740 -1 osd.1 0 read_superblock omap replica is missing. 2026-03-09T20:14:20.378 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[108655]: 2026-03-09T20:14:20.122+0000 7feac9c9b740 -1 osd.1 101 log_to_monitors true 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: pgmap v40: 161 pgs: 4 active+undersized, 20 stale+active+clean, 2 active+undersized+degraded, 135 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1c", "id": [7, 1]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.7", "id": [3, 1]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.8", "id": [3, 0]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.10", "id": [5, 6]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.12", "id": [7, 1]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.13", "id": [3, 1]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.19", "id": [3, 6, 5, 0]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:20.881 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:20.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:20.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:20.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: pgmap v40: 161 pgs: 4 active+undersized, 20 stale+active+clean, 2 active+undersized+degraded, 135 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-09T20:14:20.882 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1c", "id": [7, 1]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.7", "id": [3, 1]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.8", "id": [3, 0]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.10", "id": [5, 6]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.12", "id": [7, 1]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.13", "id": [3, 1]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.19", "id": [3, 6, 5, 0]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:20.883 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:20 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: pgmap v40: 161 pgs: 4 active+undersized, 20 stale+active+clean, 2 active+undersized+degraded, 135 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1c", "id": [7, 1]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.7", "id": [3, 1]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.8", "id": [3, 0]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.10", "id": [5, 6]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.12", "id": [7, 1]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.13", "id": [3, 1]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.19", "id": [3, 6, 5, 0]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:20 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:21.296 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:14:20 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[108655]: 2026-03-09T20:14:20.899+0000 7feac1a46640 -1 osd.1 101 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: Health check failed: Degraded data redundancy: 7/627 objects degraded (1.116%), 2 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1c", "id": [7, 1]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.7", "id": [3, 1]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.8", "id": [3, 0]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.10", "id": [5, 6]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.12", "id": [7, 1]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.13", "id": [3, 1]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.19", "id": [3, 6, 5, 0]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T20:14:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: osdmap e104: 8 total, 7 up, 8 in 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:22.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: Health check failed: Degraded data redundancy: 7/627 objects degraded (1.116%), 2 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1c", "id": [7, 1]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.7", "id": [3, 1]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.8", "id": [3, 0]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.10", "id": [5, 6]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.12", "id": [7, 1]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.13", "id": [3, 1]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.19", "id": [3, 6, 5, 0]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: osdmap e104: 8 total, 7 up, 8 in 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:14:22.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: Health check failed: Degraded data redundancy: 7/627 objects degraded (1.116%), 2 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [7, 2]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1c", "id": [7, 1]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.7", "id": [3, 1]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.8", "id": [3, 0]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.10", "id": [5, 6]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.12", "id": [7, 1]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.13", "id": [3, 1]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "6.19", "id": [3, 6, 5, 0]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: osdmap e104: 8 total, 7 up, 8 in 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.130 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:22.131 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:14:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:14:22] "GET /metrics HTTP/1.1" 200 37661 "" "Prometheus/2.51.0" 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[97066]: Upgrade: Finalizing container_image settings 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[97066]: Upgrade: Complete! 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[97066]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[97066]: osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655] boot 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[97066]: osdmap e105: 8 total, 8 up, 8 in 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[97066]: pgmap v43: 161 pgs: 5 peering, 40 active+undersized, 21 active+undersized+degraded, 95 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 84/627 objects degraded (13.397%) 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:14:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[98877]: Upgrade: Finalizing container_image settings 2026-03-09T20:14:22.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[98877]: Upgrade: Complete! 2026-03-09T20:14:22.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[98877]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:14:22.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:22.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[98877]: osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655] boot 2026-03-09T20:14:22.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[98877]: osdmap e105: 8 total, 8 up, 8 in 2026-03-09T20:14:22.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:14:22.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:22 vm01 ceph-mon[98877]: pgmap v43: 161 pgs: 5 peering, 40 active+undersized, 21 active+undersized+degraded, 95 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 84/627 objects degraded (13.397%) 2026-03-09T20:14:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:22 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:14:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:22 vm06 ceph-mon[81259]: Upgrade: Finalizing container_image settings 2026-03-09T20:14:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:22 vm06 ceph-mon[81259]: Upgrade: Complete! 2026-03-09T20:14:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:22 vm06 ceph-mon[81259]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:14:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:22 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:22 vm06 ceph-mon[81259]: osd.1 [v2:192.168.123.101:6810/271418655,v1:192.168.123.101:6811/271418655] boot 2026-03-09T20:14:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:22 vm06 ceph-mon[81259]: osdmap e105: 8 total, 8 up, 8 in 2026-03-09T20:14:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:22 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T20:14:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:22 vm06 ceph-mon[81259]: pgmap v43: 161 pgs: 5 peering, 40 active+undersized, 21 active+undersized+degraded, 95 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 84/627 objects degraded (13.397%) 2026-03-09T20:14:23.696 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:14:23.922 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:23 vm01 ceph-mon[97066]: Health check failed: Reduced data availability: 1 pg inactive, 3 pgs peering (PG_AVAILABILITY) 2026-03-09T20:14:23.922 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:23 vm01 ceph-mon[97066]: osdmap e106: 8 total, 8 up, 8 in 2026-03-09T20:14:23.922 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:23 vm01 ceph-mon[98877]: Health check failed: Reduced data availability: 1 pg inactive, 3 pgs peering (PG_AVAILABILITY) 2026-03-09T20:14:23.922 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:23 vm01 ceph-mon[98877]: osdmap e106: 8 total, 8 up, 8 in 2026-03-09T20:14:24.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:23 vm06 ceph-mon[81259]: Health check failed: Reduced data availability: 1 pg inactive, 3 pgs peering (PG_AVAILABILITY) 2026-03-09T20:14:24.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:23 vm06 ceph-mon[81259]: osdmap e106: 8 total, 8 up, 8 in 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (14m) 4s ago 21m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (2m) 62s ago 20m 78.9M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (2m) 4s ago 20m 50.2M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (2m) 62s ago 22m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (10m) 4s ago 23m 553M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (69s) 4s ago 23m 49.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (105s) 62s ago 22m 38.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (58s) 4s ago 22m 40.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (14m) 4s ago 21m 10.4M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (14m) 62s ago 21m 10.2M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (28s) 4s ago 22m 68.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (6s) 4s ago 22m 15.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (22m) 4s ago 22m 52.6M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (22m) 4s ago 22m 57.2M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (21m) 62s ago 21m 56.1M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (21m) 62s ago 21m 54.5M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (21m) 62s ago 21m 51.1M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (21m) 62s ago 21m 55.8M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (2m) 62s ago 21m 52.2M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (20m) 4s ago 20m 96.5M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:14:24.324 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (20m) 62s ago 20m 95.3M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:14:24.387 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | length == 2'"'"'' 2026-03-09T20:14:25.025 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:14:25.063 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 7'"'"'' 2026-03-09T20:14:25.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:24 vm01 ceph-mon[97066]: from='client.34205 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:25.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:24 vm01 ceph-mon[97066]: osdmap e107: 8 total, 8 up, 8 in 2026-03-09T20:14:25.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:24 vm01 ceph-mon[97066]: pgmap v46: 161 pgs: 7 peering, 36 active+undersized, 20 active+undersized+degraded, 98 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-09T20:14:25.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:24 vm01 ceph-mon[97066]: from='client.34208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:25.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:24 vm01 ceph-mon[98877]: from='client.34205 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:25.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:24 vm01 ceph-mon[98877]: osdmap e107: 8 total, 8 up, 8 in 2026-03-09T20:14:25.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:24 vm01 ceph-mon[98877]: pgmap v46: 161 pgs: 7 peering, 36 active+undersized, 20 active+undersized+degraded, 98 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-09T20:14:25.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:24 vm01 ceph-mon[98877]: from='client.34208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:25.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:24 vm06 ceph-mon[81259]: from='client.34205 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:25.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:24 vm06 ceph-mon[81259]: osdmap e107: 8 total, 8 up, 8 in 2026-03-09T20:14:25.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:24 vm06 ceph-mon[81259]: pgmap v46: 161 pgs: 7 peering, 36 active+undersized, 20 active+undersized+degraded, 98 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-09T20:14:25.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:24 vm06 ceph-mon[81259]: from='client.34208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:25.996 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:14:26.056 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T20:14:26.085 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:25 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/3330908678' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:26.085 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:25 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:26.085 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:25 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/3330908678' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:26.085 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:25 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:25 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/3330908678' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:25 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:26.623 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:14:26.623 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": null, 2026-03-09T20:14:26.623 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": false, 2026-03-09T20:14:26.623 INFO:teuthology.orchestra.run.vm01.stdout: "which": "", 2026-03-09T20:14:26.623 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:14:26.623 INFO:teuthology.orchestra.run.vm01.stdout: "progress": null, 2026-03-09T20:14:26.623 INFO:teuthology.orchestra.run.vm01.stdout: "message": "", 2026-03-09T20:14:26.623 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:14:26.623 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:14:26.679 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:14:26.873 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:26 vm01 ceph-mon[97066]: from='client.34214 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:26.873 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:26 vm01 ceph-mon[97066]: pgmap v47: 161 pgs: 2 peering, 9 active+undersized, 3 active+undersized+degraded, 147 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 12/627 objects degraded (1.914%); 77 B/s, 0 objects/s recovering 2026-03-09T20:14:27.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:26 vm01 ceph-mon[98877]: from='client.34214 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:27.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:26 vm01 ceph-mon[98877]: pgmap v47: 161 pgs: 2 peering, 9 active+undersized, 3 active+undersized+degraded, 147 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 12/627 objects degraded (1.914%); 77 B/s, 0 objects/s recovering 2026-03-09T20:14:27.129 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:27 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:27.000Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:27.129 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:27 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:27.002Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:27.324 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN Degraded data redundancy: 12/627 objects degraded (1.914%), 3 pgs degraded 2026-03-09T20:14:27.324 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] PG_DEGRADED: Degraded data redundancy: 12/627 objects degraded (1.914%), 3 pgs degraded 2026-03-09T20:14:27.324 INFO:teuthology.orchestra.run.vm01.stdout: pg 3.8 is active+undersized+degraded, acting [3,7] 2026-03-09T20:14:27.324 INFO:teuthology.orchestra.run.vm01.stdout: pg 3.18 is active+undersized+degraded, acting [3,0] 2026-03-09T20:14:27.324 INFO:teuthology.orchestra.run.vm01.stdout: pg 4.14 is active+undersized+degraded, acting [3,7] 2026-03-09T20:14:27.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:26 vm06 ceph-mon[81259]: from='client.34214 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:27.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:26 vm06 ceph-mon[81259]: pgmap v47: 161 pgs: 2 peering, 9 active+undersized, 3 active+undersized+degraded, 147 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 12/627 objects degraded (1.914%); 77 B/s, 0 objects/s recovering 2026-03-09T20:14:27.409 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd --limit 1' 2026-03-09T20:14:27.875 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:27 vm01 ceph-mon[97066]: from='client.54190 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:28.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:27 vm01 ceph-mon[97066]: Health check update: Degraded data redundancy: 12/627 objects degraded (1.914%), 3 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:28.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:27 vm01 ceph-mon[97066]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 3 pgs peering) 2026-03-09T20:14:28.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:27 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/1126688135' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:14:28.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:27 vm01 ceph-mon[98877]: from='client.54190 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:28.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:27 vm01 ceph-mon[98877]: Health check update: Degraded data redundancy: 12/627 objects degraded (1.914%), 3 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:28.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:27 vm01 ceph-mon[98877]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 3 pgs peering) 2026-03-09T20:14:28.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:27 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/1126688135' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:14:28.307 INFO:teuthology.orchestra.run.vm01.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:28.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:27 vm06 ceph-mon[81259]: from='client.54190 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:28.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:27 vm06 ceph-mon[81259]: Health check update: Degraded data redundancy: 12/627 objects degraded (1.914%), 3 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:28.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:27 vm06 ceph-mon[81259]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 3 pgs peering) 2026-03-09T20:14:28.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:27 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/1126688135' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:14:28.360 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T20:14:28.961 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (14m) 9s ago 21m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (2m) 67s ago 20m 78.9M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (2m) 9s ago 20m 50.2M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (2m) 67s ago 22m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (10m) 9s ago 23m 553M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (74s) 9s ago 23m 49.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (110s) 67s ago 22m 38.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (63s) 9s ago 22m 40.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (14m) 9s ago 21m 10.4M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (14m) 67s ago 21m 10.2M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (33s) 9s ago 22m 68.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (11s) 9s ago 22m 15.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (22m) 9s ago 22m 52.6M 4096M 17.2.0 e1d6a67b021e f24ff2b9e07a 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (22m) 9s ago 22m 57.2M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (22m) 67s ago 22m 56.1M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (21m) 67s ago 21m 54.5M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (21m) 67s ago 21m 51.1M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (21m) 67s ago 21m 55.8M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (2m) 67s ago 21m 52.2M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (20m) 9s ago 20m 96.5M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:14:29.415 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (20m) 67s ago 20m 95.3M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[97066]: from='client.34229 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "limit": 1, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[97066]: pgmap v48: 161 pgs: 2 peering, 159 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 493 B/s rd, 0 op/s; 65 B/s, 0 objects/s recovering 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[97066]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[97066]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 12/627 objects degraded (1.914%), 3 pgs degraded) 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[97066]: Cluster is now healthy 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[98877]: from='client.34229 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "limit": 1, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[98877]: pgmap v48: 161 pgs: 2 peering, 159 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 493 B/s rd, 0 op/s; 65 B/s, 0 objects/s recovering 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[98877]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[98877]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 12/627 objects degraded (1.914%), 3 pgs degraded) 2026-03-09T20:14:29.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:29 vm01 ceph-mon[98877]: Cluster is now healthy 2026-03-09T20:14:29.692 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:14:29.692 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:14:29.692 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:14:29.692 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:14:29.692 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:14:29.692 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 6, 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8, 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 7 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:14:29.693 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:14:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:29 vm06 ceph-mon[81259]: from='client.34229 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "limit": 1, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:29 vm06 ceph-mon[81259]: pgmap v48: 161 pgs: 2 peering, 159 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 493 B/s rd, 0 op/s; 65 B/s, 0 objects/s recovering 2026-03-09T20:14:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:29 vm06 ceph-mon[81259]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:29 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:29 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:29 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:29 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:29 vm06 ceph-mon[81259]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 12/627 objects degraded (1.914%), 3 pgs degraded) 2026-03-09T20:14:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:29 vm06 ceph-mon[81259]: Cluster is now healthy 2026-03-09T20:14:29.944 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:14:29.944 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:14:29.944 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:14:29.944 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons of type(s) crash,osd. Upgrade limited to 1 daemons (1 remaining).", 2026-03-09T20:14:29.944 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:14:29.944 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "2/8 daemons upgraded", 2026-03-09T20:14:29.944 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T20:14:29.944 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:14:29.944 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='client.44208 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='client.44211 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='client.54214 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all mgr 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/392546381' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='client.44208 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='client.44211 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='client.54214 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.492 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all mgr 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/392546381' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T20:14:30.493 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:30 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:30.764 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:30 vm01 systemd[1]: Stopping Ceph osd.2 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='client.44208 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='client.44211 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='client.54214 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all mgr 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/392546381' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T20:14:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:30 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:31.128 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[62424]: 2026-03-09T20:14:30.762+0000 7efe09d0e700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:14:31.129 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[62424]: 2026-03-09T20:14:30.762+0000 7efe09d0e700 -1 osd.2 107 *** Got signal Terminated *** 2026-03-09T20:14:31.129 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:30 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[62424]: 2026-03-09T20:14:30.762+0000 7efe09d0e700 -1 osd.2 107 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:14:31.753 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all mon 2026-03-09T20:14:31.753 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all crash 2026-03-09T20:14:31.753 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T20:14:31.753 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[97066]: Upgrade: osd.2 is safe to restart 2026-03-09T20:14:31.753 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[97066]: Upgrade: Updating osd.2 2026-03-09T20:14:31.753 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[97066]: Deploying daemon osd.2 on vm01 2026-03-09T20:14:31.753 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[97066]: from='client.44229 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:31.753 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[97066]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 0 op/s; 50 B/s, 0 objects/s recovering 2026-03-09T20:14:31.753 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[97066]: osd.2 marked itself down and dead 2026-03-09T20:14:31.753 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 podman[113694]: 2026-03-09 20:14:31.53035433 +0000 UTC m=+0.783378843 container died f24ff2b9e07a73573df1779f2099b03bd6a104247e79bd25e85cdddc27c126d4 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2, name=centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Guillaume Abrioux , summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, vcs-type=git, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, GIT_CLEAN=True, io.openshift.expose-services=, vendor=Red Hat, Inc., architecture=x86_64, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, GIT_REPO=https://github.com/ceph/ceph-container.git, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, ceph=True, RELEASE=HEAD, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.buildah.version=1.19.8) 2026-03-09T20:14:31.753 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 podman[113694]: 2026-03-09 20:14:31.549639526 +0000 UTC m=+0.802664049 container remove f24ff2b9e07a73573df1779f2099b03bd6a104247e79bd25e85cdddc27c126d4 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=8, release=754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, ceph=True, architecture=x86_64, io.buildah.version=1.19.8, maintainer=Guillaume Abrioux , GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, GIT_CLEAN=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, name=centos-stream, build-date=2022-05-03T08:36:31.336870) 2026-03-09T20:14:31.753 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 bash[113694]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2 2026-03-09T20:14:31.753 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 podman[113759]: 2026-03-09 20:14:31.674928933 +0000 UTC m=+0.016497989 container create e24fcb6d676a701610863da3d874cc2ccfb7b55d9a132c698aaaf9e83dd77f23 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=squid) 2026-03-09T20:14:31.754 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all mon 2026-03-09T20:14:31.754 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all crash 2026-03-09T20:14:31.754 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T20:14:31.754 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[98877]: Upgrade: osd.2 is safe to restart 2026-03-09T20:14:31.754 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[98877]: Upgrade: Updating osd.2 2026-03-09T20:14:31.754 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[98877]: Deploying daemon osd.2 on vm01 2026-03-09T20:14:31.754 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[98877]: from='client.44229 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:31.754 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[98877]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 0 op/s; 50 B/s, 0 objects/s recovering 2026-03-09T20:14:31.754 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:31 vm01 ceph-mon[98877]: osd.2 marked itself down and dead 2026-03-09T20:14:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:31 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all mon 2026-03-09T20:14:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:31 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all crash 2026-03-09T20:14:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:31 vm06 ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T20:14:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:31 vm06 ceph-mon[81259]: Upgrade: osd.2 is safe to restart 2026-03-09T20:14:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:31 vm06 ceph-mon[81259]: Upgrade: Updating osd.2 2026-03-09T20:14:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:31 vm06 ceph-mon[81259]: Deploying daemon osd.2 on vm01 2026-03-09T20:14:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:31 vm06 ceph-mon[81259]: from='client.44229 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:14:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:31 vm06 ceph-mon[81259]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 0 op/s; 50 B/s, 0 objects/s recovering 2026-03-09T20:14:31.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:31 vm06 ceph-mon[81259]: osd.2 marked itself down and dead 2026-03-09T20:14:32.014 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 podman[113759]: 2026-03-09 20:14:31.75313313 +0000 UTC m=+0.094702176 container init e24fcb6d676a701610863da3d874cc2ccfb7b55d9a132c698aaaf9e83dd77f23 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0) 2026-03-09T20:14:32.014 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 podman[113759]: 2026-03-09 20:14:31.756506364 +0000 UTC m=+0.098075420 container start e24fcb6d676a701610863da3d874cc2ccfb7b55d9a132c698aaaf9e83dd77f23 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-deactivate, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_REF=squid, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:14:32.014 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 podman[113759]: 2026-03-09 20:14:31.757299068 +0000 UTC m=+0.098868114 container attach e24fcb6d676a701610863da3d874cc2ccfb7b55d9a132c698aaaf9e83dd77f23 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True) 2026-03-09T20:14:32.015 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 podman[113759]: 2026-03-09 20:14:31.667347073 +0000 UTC m=+0.008916139 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:32.015 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 podman[113759]: 2026-03-09 20:14:31.900409511 +0000 UTC m=+0.241978567 container died e24fcb6d676a701610863da3d874cc2ccfb7b55d9a132c698aaaf9e83dd77f23 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:14:32.015 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 podman[113759]: 2026-03-09 20:14:31.917947649 +0000 UTC m=+0.259516694 container remove e24fcb6d676a701610863da3d874cc2ccfb7b55d9a132c698aaaf9e83dd77f23 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0) 2026-03-09T20:14:32.015 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.2.service: Deactivated successfully. 2026-03-09T20:14:32.015 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.2.service: Unit process 113769 (conmon) remains running after unit stopped. 2026-03-09T20:14:32.015 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.2.service: Unit process 113777 (podman) remains running after unit stopped. 2026-03-09T20:14:32.015 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 systemd[1]: Stopped Ceph osd.2 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:14:32.015 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:31 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.2.service: Consumed 15.592s CPU time, 140.7M memory peak. 2026-03-09T20:14:32.388 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 systemd[1]: Starting Ceph osd.2 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:14:32.388 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 podman[113859]: 2026-03-09 20:14:32.238445202 +0000 UTC m=+0.018806201 container create 8fd87b7a6f315588b79ebb62d39f240d16cdb1f52eca7639154d3550609ec77e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=squid, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0) 2026-03-09T20:14:32.388 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 podman[113859]: 2026-03-09 20:14:32.28480751 +0000 UTC m=+0.065168509 container init 8fd87b7a6f315588b79ebb62d39f240d16cdb1f52eca7639154d3550609ec77e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223) 2026-03-09T20:14:32.389 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 podman[113859]: 2026-03-09 20:14:32.287841178 +0000 UTC m=+0.068202168 container start 8fd87b7a6f315588b79ebb62d39f240d16cdb1f52eca7639154d3550609ec77e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:14:32.389 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 podman[113859]: 2026-03-09 20:14:32.288785445 +0000 UTC m=+0.069146444 container attach 8fd87b7a6f315588b79ebb62d39f240d16cdb1f52eca7639154d3550609ec77e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, CEPH_REF=squid) 2026-03-09T20:14:32.389 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 podman[113859]: 2026-03-09 20:14:32.230572256 +0000 UTC m=+0.010933265 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:32.389 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:32.389 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 bash[113859]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:32.389 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:32.389 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 bash[113859]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:32.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:32 vm06 ceph-mon[81259]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:14:32.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:32 vm06 ceph-mon[81259]: osdmap e108: 8 total, 7 up, 8 in 2026-03-09T20:14:32.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:14:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:14:32] "GET /metrics HTTP/1.1" 200 37661 "" "Prometheus/2.51.0" 2026-03-09T20:14:32.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:32 vm01 ceph-mon[97066]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:14:32.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:32 vm01 ceph-mon[97066]: osdmap e108: 8 total, 7 up, 8 in 2026-03-09T20:14:32.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:32 vm01 ceph-mon[98877]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:14:32.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:32 vm01 ceph-mon[98877]: osdmap e108: 8 total, 7 up, 8 in 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 bash[113859]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 bash[113859]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 bash[113859]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 bash[113859]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-c4e4c1ab-9551-4547-99c1-fe52185256fc/osd-block-98d1fc31-9ae8-4c8e-bed3-b4c00f6da774 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:32 vm01 bash[113859]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-c4e4c1ab-9551-4547-99c1-fe52185256fc/osd-block-98d1fc31-9ae8-4c8e-bed3-b4c00f6da774 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T20:14:33.256 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/ln -snf /dev/ceph-c4e4c1ab-9551-4547-99c1-fe52185256fc/osd-block-98d1fc31-9ae8-4c8e-bed3-b4c00f6da774 /var/lib/ceph/osd/ceph-2/block 2026-03-09T20:14:33.628 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 bash[113859]: Running command: /usr/bin/ln -snf /dev/ceph-c4e4c1ab-9551-4547-99c1-fe52185256fc/osd-block-98d1fc31-9ae8-4c8e-bed3-b4c00f6da774 /var/lib/ceph/osd/ceph-2/block 2026-03-09T20:14:33.628 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T20:14:33.628 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 bash[113859]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T20:14:33.628 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T20:14:33.628 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 bash[113859]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 bash[113859]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate[113869]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 bash[113859]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 podman[113859]: 2026-03-09 20:14:33.292416911 +0000 UTC m=+1.072777900 container died 8fd87b7a6f315588b79ebb62d39f240d16cdb1f52eca7639154d3550609ec77e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=squid) 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 podman[113859]: 2026-03-09 20:14:33.312460116 +0000 UTC m=+1.092821115 container remove 8fd87b7a6f315588b79ebb62d39f240d16cdb1f52eca7639154d3550609ec77e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-activate, org.label-schema.build-date=20260223, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 podman[114110]: 2026-03-09 20:14:33.393716415 +0000 UTC m=+0.018808836 container create 3c062f84ee5cc99bca4a88c22c6a7189c1ae4bdf5f946461fcededcb79ad006c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 podman[114110]: 2026-03-09 20:14:33.433676389 +0000 UTC m=+0.058768821 container init 3c062f84ee5cc99bca4a88c22c6a7189c1ae4bdf5f946461fcededcb79ad006c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223) 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 podman[114110]: 2026-03-09 20:14:33.436678989 +0000 UTC m=+0.061771411 container start 3c062f84ee5cc99bca4a88c22c6a7189c1ae4bdf5f946461fcededcb79ad006c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 bash[114110]: 3c062f84ee5cc99bca4a88c22c6a7189c1ae4bdf5f946461fcededcb79ad006c 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 podman[114110]: 2026-03-09 20:14:33.386234061 +0000 UTC m=+0.011326502 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:14:33.629 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:33 vm01 systemd[1]: Started Ceph osd.2 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[97066]: pgmap v51: 161 pgs: 12 stale+active+clean, 149 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 994 B/s rd, 0 op/s; 49 B/s, 0 objects/s recovering 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[97066]: osdmap e109: 8 total, 7 up, 8 in 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[98877]: pgmap v51: 161 pgs: 12 stale+active+clean, 149 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 994 B/s rd, 0 op/s; 49 B/s, 0 objects/s recovering 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[98877]: osdmap e109: 8 total, 7 up, 8 in 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:33.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:33 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:33 vm06 ceph-mon[81259]: pgmap v51: 161 pgs: 12 stale+active+clean, 149 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 994 B/s rd, 0 op/s; 49 B/s, 0 objects/s recovering 2026-03-09T20:14:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:33 vm06 ceph-mon[81259]: osdmap e109: 8 total, 7 up, 8 in 2026-03-09T20:14:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:34.479 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:34 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[114120]: 2026-03-09T20:14:34.267+0000 7fbb5c76b740 -1 Falling back to public interface 2026-03-09T20:14:34.480 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:34 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:34.150Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:34.826 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:34 vm01 ceph-mon[97066]: pgmap v53: 161 pgs: 7 active+undersized, 10 stale+active+clean, 1 active+undersized+degraded, 143 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 5/627 objects degraded (0.797%) 2026-03-09T20:14:34.826 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:34 vm01 ceph-mon[97066]: Health check failed: Degraded data redundancy: 5/627 objects degraded (0.797%), 1 pg degraded (PG_DEGRADED) 2026-03-09T20:14:34.826 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:34 vm01 ceph-mon[98877]: pgmap v53: 161 pgs: 7 active+undersized, 10 stale+active+clean, 1 active+undersized+degraded, 143 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 5/627 objects degraded (0.797%) 2026-03-09T20:14:34.826 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:34 vm01 ceph-mon[98877]: Health check failed: Degraded data redundancy: 5/627 objects degraded (0.797%), 1 pg degraded (PG_DEGRADED) 2026-03-09T20:14:34.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:34 vm06 ceph-mon[81259]: pgmap v53: 161 pgs: 7 active+undersized, 10 stale+active+clean, 1 active+undersized+degraded, 143 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 5/627 objects degraded (0.797%) 2026-03-09T20:14:34.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:34 vm06 ceph-mon[81259]: Health check failed: Degraded data redundancy: 5/627 objects degraded (0.797%), 1 pg degraded (PG_DEGRADED) 2026-03-09T20:14:35.128 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:34 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[114120]: 2026-03-09T20:14:34.879+0000 7fbb5c76b740 -1 osd.2 0 read_superblock omap replica is missing. 2026-03-09T20:14:35.128 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:34 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[114120]: 2026-03-09T20:14:34.919+0000 7fbb5c76b740 -1 osd.2 107 log_to_monitors true 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[97066]: from='osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[98877]: from='osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.262 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:14:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[114120]: 2026-03-09T20:14:36.204+0000 7fbb54516640 -1 osd.2 107 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T20:14:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:36 vm06 ceph-mon[81259]: from='osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T20:14:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.133 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:37.002Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:37.133 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:37.003Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: pgmap v54: 161 pgs: 27 active+undersized, 3 stale+active+clean, 14 active+undersized+degraded, 117 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s; 52/627 objects degraded (8.293%) 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: osdmap e110: 8 total, 7 up, 8 in 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:37.593 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:37.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: pgmap v54: 161 pgs: 27 active+undersized, 3 stale+active+clean, 14 active+undersized+degraded, 117 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s; 52/627 objects degraded (8.293%) 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: osdmap e110: 8 total, 7 up, 8 in 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:37.642 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: pgmap v54: 161 pgs: 27 active+undersized, 3 stale+active+clean, 14 active+undersized+degraded, 117 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s; 52/627 objects degraded (8.293%) 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: osdmap e110: 8 total, 7 up, 8 in 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:37.643 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:37.644 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:37.644 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:14:37.644 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:14:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:38 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:14:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:38 vm06 ceph-mon[81259]: Upgrade: Finalizing container_image settings 2026-03-09T20:14:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:38 vm06 ceph-mon[81259]: Upgrade: Complete! 2026-03-09T20:14:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:38 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:38 vm06 ceph-mon[81259]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:14:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:38 vm06 ceph-mon[81259]: osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109] boot 2026-03-09T20:14:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:38 vm06 ceph-mon[81259]: osdmap e111: 8 total, 8 up, 8 in 2026-03-09T20:14:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:38 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:14:38.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:38 vm06 ceph-mon[81259]: osdmap e112: 8 total, 8 up, 8 in 2026-03-09T20:14:38.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:14:38.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[98877]: Upgrade: Finalizing container_image settings 2026-03-09T20:14:38.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[98877]: Upgrade: Complete! 2026-03-09T20:14:38.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:38.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[98877]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:14:38.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[98877]: osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109] boot 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[98877]: osdmap e111: 8 total, 8 up, 8 in 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[98877]: osdmap e112: 8 total, 8 up, 8 in 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[97066]: Upgrade: Finalizing container_image settings 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[97066]: Upgrade: Complete! 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[97066]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[97066]: osd.2 [v2:192.168.123.101:6818/1213399109,v1:192.168.123.101:6819/1213399109] boot 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[97066]: osdmap e111: 8 total, 8 up, 8 in 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T20:14:38.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:38 vm01 ceph-mon[97066]: osdmap e112: 8 total, 8 up, 8 in 2026-03-09T20:14:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:39 vm06 ceph-mon[81259]: pgmap v57: 161 pgs: 32 active+undersized, 15 active+undersized+degraded, 114 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 57/627 objects degraded (9.091%) 2026-03-09T20:14:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:39 vm01 ceph-mon[98877]: pgmap v57: 161 pgs: 32 active+undersized, 15 active+undersized+degraded, 114 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 57/627 objects degraded (9.091%) 2026-03-09T20:14:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:39 vm01 ceph-mon[97066]: pgmap v57: 161 pgs: 32 active+undersized, 15 active+undersized+degraded, 114 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 57/627 objects degraded (9.091%) 2026-03-09T20:14:40.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:40 vm06 ceph-mon[81259]: Health check update: Degraded data redundancy: 57/627 objects degraded (9.091%), 15 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:40.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:40 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:40.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:40 vm01 ceph-mon[97066]: Health check update: Degraded data redundancy: 57/627 objects degraded (9.091%), 15 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:40.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:40 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:40.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:40 vm01 ceph-mon[98877]: Health check update: Degraded data redundancy: 57/627 objects degraded (9.091%), 15 pgs degraded (PG_DEGRADED) 2026-03-09T20:14:40.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:40 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:41.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:41 vm06 ceph-mon[81259]: pgmap v59: 161 pgs: 27 active+undersized, 14 active+undersized+degraded, 120 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 52/627 objects degraded (8.293%) 2026-03-09T20:14:41.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:41 vm01 ceph-mon[97066]: pgmap v59: 161 pgs: 27 active+undersized, 14 active+undersized+degraded, 120 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 52/627 objects degraded (8.293%) 2026-03-09T20:14:41.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:41 vm01 ceph-mon[98877]: pgmap v59: 161 pgs: 27 active+undersized, 14 active+undersized+degraded, 120 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 52/627 objects degraded (8.293%) 2026-03-09T20:14:42.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:42 vm06 ceph-mon[81259]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 52/627 objects degraded (8.293%), 14 pgs degraded) 2026-03-09T20:14:42.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:42 vm06 ceph-mon[81259]: Cluster is now healthy 2026-03-09T20:14:42.606 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:42 vm01 ceph-mon[97066]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 52/627 objects degraded (8.293%), 14 pgs degraded) 2026-03-09T20:14:42.606 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:42 vm01 ceph-mon[97066]: Cluster is now healthy 2026-03-09T20:14:42.606 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:42 vm01 ceph-mon[98877]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 52/627 objects degraded (8.293%), 14 pgs degraded) 2026-03-09T20:14:42.606 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:42 vm01 ceph-mon[98877]: Cluster is now healthy 2026-03-09T20:14:42.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:14:42 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:14:42] "GET /metrics HTTP/1.1" 200 37741 "" "Prometheus/2.51.0" 2026-03-09T20:14:43.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:43 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:43.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:43 vm01 ceph-mon[97066]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-09T20:14:43.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:43 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:43.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:43 vm01 ceph-mon[98877]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-09T20:14:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:43 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:43 vm06 ceph-mon[81259]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-09T20:14:44.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:44 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:44.149Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:44.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:44 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:44.150Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:45 vm01 ceph-mon[97066]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 647 B/s rd, 0 op/s 2026-03-09T20:14:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:45 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:45.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:45 vm01 ceph-mon[98877]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 647 B/s rd, 0 op/s 2026-03-09T20:14:45.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:45 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:45.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:45 vm06 ceph-mon[81259]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 647 B/s rd, 0 op/s 2026-03-09T20:14:45.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:45 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:47.362 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:47.003Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:47.362 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:47.003Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:47.362 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:47 vm01 ceph-mon[97066]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 575 B/s rd, 0 op/s 2026-03-09T20:14:47.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:47 vm01 ceph-mon[98877]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 575 B/s rd, 0 op/s 2026-03-09T20:14:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:47 vm06 ceph-mon[81259]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 575 B/s rd, 0 op/s 2026-03-09T20:14:49.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:49 vm01 ceph-mon[97066]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:14:49.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:49 vm01 ceph-mon[98877]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:14:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:49 vm06 ceph-mon[81259]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:14:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:51 vm06 ceph-mon[81259]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T20:14:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:51 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:51 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:51 vm01 ceph-mon[97066]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T20:14:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:51 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:51 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:51 vm01 ceph-mon[98877]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T20:14:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:51 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:14:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:51 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:14:52.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:14:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:14:52] "GET /metrics HTTP/1.1" 200 37748 "" "Prometheus/2.51.0" 2026-03-09T20:14:53.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:53 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:53.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:53 vm06 ceph-mon[81259]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:53 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:53 vm01 ceph-mon[97066]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:53.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:53 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:14:53.634 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:53 vm01 ceph-mon[98877]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:54.628 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:54 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:54.150Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:55 vm06 ceph-mon[81259]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:55.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:55 vm01 ceph-mon[97066]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:55.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:55 vm01 ceph-mon[98877]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:57.312 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:57.003Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:57.312 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:14:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:14:57.004Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:14:57.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:57 vm06 ceph-mon[81259]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:57.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:57 vm01 ceph-mon[97066]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:57.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:57 vm01 ceph-mon[98877]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:14:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:14:59 vm06 ceph-mon[81259]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:59.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:14:59 vm01 ceph-mon[97066]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:14:59.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:14:59 vm01 ceph-mon[98877]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:00.244 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (15m) 26s ago 21m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (2m) 99s ago 21m 78.9M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (2m) 26s ago 21m 50.2M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (2m) 99s ago 23m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (11m) 26s ago 24m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (106s) 26s ago 24m 52.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (2m) 99s ago 23m 38.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (95s) 26s ago 23m 42.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (15m) 26s ago 21m 10.4M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (15m) 99s ago 21m 10.2M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (64s) 26s ago 23m 68.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (42s) 26s ago 22m 47.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (27s) 26s ago 22m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (22m) 26s ago 22m 57.7M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (22m) 99s ago 22m 56.1M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (22m) 99s ago 22m 54.5M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (22m) 99s ago 22m 51.1M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (22m) 99s ago 22m 55.8M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (2m) 99s ago 21m 52.2M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (21m) 26s ago 21m 96.6M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:15:00.905 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (21m) 99s ago 21m 95.3M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:15:00.996 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | length == 2'"'"'' 2026-03-09T20:15:01.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:01 vm01 ceph-mon[97066]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:01.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:01 vm01 ceph-mon[97066]: from='client.54235 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:01.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:01 vm01 ceph-mon[98877]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:01.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:01 vm01 ceph-mon[98877]: from='client.54235 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:01.691 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:15:01.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:01 vm06 ceph-mon[81259]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:01.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:01 vm06 ceph-mon[81259]: from='client.54235 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:01.847 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 8'"'"'' 2026-03-09T20:15:02.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:02 vm01 ceph-mon[97066]: from='client.54241 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:02.607 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:02 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/2675094467' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:02.607 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:02 vm01 ceph-mon[98877]: from='client.54241 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:02.607 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:02 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/2675094467' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:02 vm06 ceph-mon[81259]: from='client.54241 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:02.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:02 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/2675094467' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:02.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:15:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:15:02] "GET /metrics HTTP/1.1" 200 37748 "" "Prometheus/2.51.0" 2026-03-09T20:15:03.011 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:15:03.060 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T20:15:03.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:03 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:03.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:03 vm01 ceph-mon[97066]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:03.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:03 vm01 ceph-mon[97066]: from='client.34256 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:03.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:03 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:03.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:03 vm01 ceph-mon[98877]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:03.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:03 vm01 ceph-mon[98877]: from='client.34256 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:03.729 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:15:03.729 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": null, 2026-03-09T20:15:03.729 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": false, 2026-03-09T20:15:03.729 INFO:teuthology.orchestra.run.vm01.stdout: "which": "", 2026-03-09T20:15:03.729 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:15:03.729 INFO:teuthology.orchestra.run.vm01.stdout: "progress": null, 2026-03-09T20:15:03.729 INFO:teuthology.orchestra.run.vm01.stdout: "message": "", 2026-03-09T20:15:03.729 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:15:03.729 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:15:03.810 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:15:03.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:03 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:03.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:03 vm06 ceph-mon[81259]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:03.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:03 vm06 ceph-mon[81259]: from='client.34256 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:04.569 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:04 vm01 ceph-mon[97066]: from='client.54256 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:04.569 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:04 vm01 ceph-mon[97066]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:04.569 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:04 vm01 ceph-mon[98877]: from='client.54256 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:04.569 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:04 vm01 ceph-mon[98877]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:04.591 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T20:15:04.648 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd' 2026-03-09T20:15:04.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:04 vm06 ceph-mon[81259]: from='client.54256 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:04.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:04 vm06 ceph-mon[81259]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:05.572 INFO:teuthology.orchestra.run.vm01.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:05.625 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T20:15:05.820 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:05 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/3495567319' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:15:05.820 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:05 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:05.820 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:05 vm01 ceph-mon[97066]: from='client.54265 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:05.821 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:05 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/3495567319' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:15:05.821 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:05 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:05.821 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:05 vm01 ceph-mon[98877]: from='client.54265 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:05 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/3495567319' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:15:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:05 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:05 vm06 ceph-mon[81259]: from='client.54265 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:06.285 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all mgr 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all mon 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: Upgrade: Setting container_image for all crash 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: Upgrade: osd.3 is safe to restart 2026-03-09T20:15:06.796 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[98877]: from='client.34277 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all mgr 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all mon 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: Upgrade: Setting container_image for all crash 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: Upgrade: osd.3 is safe to restart 2026-03-09T20:15:06.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:06 vm01 ceph-mon[97066]: from='client.34277 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (15m) 31s ago 21m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (2m) 104s ago 21m 78.9M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (2m) 31s ago 21m 50.2M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (2m) 104s ago 23m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (11m) 31s ago 24m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (111s) 31s ago 24m 52.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (2m) 104s ago 23m 38.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (101s) 31s ago 23m 42.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (15m) 31s ago 21m 10.4M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (15m) 104s ago 21m 10.2M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (70s) 31s ago 23m 68.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (48s) 31s ago 23m 47.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (33s) 31s ago 22m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (22m) 31s ago 22m 57.7M 4096M 17.2.0 e1d6a67b021e 97e61b18d2de 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (22m) 104s ago 22m 56.1M 4096M 17.2.0 e1d6a67b021e 8eb62b7332f0 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (22m) 104s ago 22m 54.5M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (22m) 104s ago 22m 51.1M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (22m) 104s ago 22m 55.8M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (3m) 104s ago 21m 52.2M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (21m) 31s ago 21m 96.6M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:15:06.799 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (21m) 104s ago 21m 95.3M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all mgr 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all mon 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: Upgrade: Setting container_image for all crash 2026-03-09T20:15:06.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:06.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T20:15:06.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T20:15:06.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: Upgrade: osd.3 is safe to restart 2026-03-09T20:15:06.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:06 vm06 ceph-mon[81259]: from='client.34277 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:07.057 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:07.003Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:07.057 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:07.005Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:07.118 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 7, 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:15:07.119 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:15:07.371 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:15:07.371 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:15:07.371 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:15:07.372 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-09T20:15:07.372 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:15:07.372 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "3/8 daemons upgraded", 2026-03-09T20:15:07.372 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T20:15:07.372 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:15:07.372 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:15:07.628 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:07 vm01 systemd[1]: Stopping Ceph osd.3 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:15:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:07 vm06 ceph-mon[81259]: from='client.44283 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:07 vm06 ceph-mon[81259]: Upgrade: Updating osd.3 2026-03-09T20:15:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:07 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:07 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T20:15:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:07 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:07 vm06 ceph-mon[81259]: Deploying daemon osd.3 on vm01 2026-03-09T20:15:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:07 vm06 ceph-mon[81259]: from='client.44289 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:07 vm06 ceph-mon[81259]: from='client.? 192.168.123.101:0/2241735353' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:08.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:07 vm06 ceph-mon[81259]: from='client.34295 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:08.128 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[65254]: 2026-03-09T20:15:07.637+0000 7f955e695700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:15:08.128 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[65254]: 2026-03-09T20:15:07.637+0000 7f955e695700 -1 osd.3 112 *** Got signal Terminated *** 2026-03-09T20:15:08.128 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[65254]: 2026-03-09T20:15:07.637+0000 7f955e695700 -1 osd.3 112 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:15:08.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[97066]: from='client.44283 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:08.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[97066]: Upgrade: Updating osd.3 2026-03-09T20:15:08.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:08.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T20:15:08.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:08.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[97066]: Deploying daemon osd.3 on vm01 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[97066]: from='client.44289 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/2241735353' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[97066]: from='client.34295 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[98877]: from='client.44283 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[98877]: Upgrade: Updating osd.3 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[98877]: Deploying daemon osd.3 on vm01 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[98877]: from='client.44289 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/2241735353' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:08.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:07 vm01 ceph-mon[98877]: from='client.34295 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:08.911 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:08 vm01 podman[119180]: 2026-03-09 20:15:08.707135725 +0000 UTC m=+1.095835818 container died 97e61b18d2de2f16b5ae0bb92f8799f0fe8ac0e80818c14dafa0d2bd5a3d7ca4 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3, release=754, com.redhat.component=centos-stream-container, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.display-name=CentOS Stream 8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870, GIT_REPO=https://github.com/ceph/ceph-container.git, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, vendor=Red Hat, Inc., RELEASE=HEAD, io.openshift.expose-services=, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, name=centos-stream, GIT_BRANCH=HEAD, version=8, maintainer=Guillaume Abrioux , GIT_CLEAN=True, io.buildah.version=1.19.8) 2026-03-09T20:15:08.911 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:08 vm01 podman[119180]: 2026-03-09 20:15:08.744972144 +0000 UTC m=+1.133672237 container remove 97e61b18d2de2f16b5ae0bb92f8799f0fe8ac0e80818c14dafa0d2bd5a3d7ca4 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.tags=base centos centos-stream, ceph=True, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.display-name=CentOS Stream 8, vcs-type=git, io.openshift.expose-services=, vendor=Red Hat, Inc., version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, build-date=2022-05-03T08:36:31.336870, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, GIT_CLEAN=True, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, release=754, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T20:15:08.911 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:08 vm01 bash[119180]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3 2026-03-09T20:15:08.911 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:08 vm01 podman[119245]: 2026-03-09 20:15:08.885955746 +0000 UTC m=+0.019712798 container create 8d78d0cf27fdb024a8179dd456abb8704e7a68d4cedc4a1749e10c82e6ceb15e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:15:08.911 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:08 vm01 ceph-mon[97066]: osd.3 marked itself down and dead 2026-03-09T20:15:08.911 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:08 vm01 ceph-mon[97066]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:08.911 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:08 vm01 ceph-mon[98877]: osd.3 marked itself down and dead 2026-03-09T20:15:08.911 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:08 vm01 ceph-mon[98877]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:09.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:08 vm06 ceph-mon[81259]: osd.3 marked itself down and dead 2026-03-09T20:15:09.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:08 vm06 ceph-mon[81259]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:09.195 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:08 vm01 podman[119245]: 2026-03-09 20:15:08.929483688 +0000 UTC m=+0.063240750 container init 8d78d0cf27fdb024a8179dd456abb8704e7a68d4cedc4a1749e10c82e6ceb15e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:15:09.196 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:08 vm01 podman[119245]: 2026-03-09 20:15:08.933129392 +0000 UTC m=+0.066886454 container start 8d78d0cf27fdb024a8179dd456abb8704e7a68d4cedc4a1749e10c82e6ceb15e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:15:09.196 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:08 vm01 podman[119245]: 2026-03-09 20:15:08.936507005 +0000 UTC m=+0.070264067 container attach 8d78d0cf27fdb024a8179dd456abb8704e7a68d4cedc4a1749e10c82e6ceb15e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS) 2026-03-09T20:15:09.196 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:08 vm01 podman[119245]: 2026-03-09 20:15:08.877764525 +0000 UTC m=+0.011521587 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:09.196 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 podman[119264]: 2026-03-09 20:15:09.09029798 +0000 UTC m=+0.010604971 container died 8d78d0cf27fdb024a8179dd456abb8704e7a68d4cedc4a1749e10c82e6ceb15e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2) 2026-03-09T20:15:09.196 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 podman[119264]: 2026-03-09 20:15:09.105393755 +0000 UTC m=+0.025700746 container remove 8d78d0cf27fdb024a8179dd456abb8704e7a68d4cedc4a1749e10c82e6ceb15e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default) 2026-03-09T20:15:09.196 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.3.service: Deactivated successfully. 2026-03-09T20:15:09.196 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 systemd[1]: Stopped Ceph osd.3 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:15:09.196 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.3.service: Consumed 8.202s CPU time. 2026-03-09T20:15:09.628 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 systemd[1]: Starting Ceph osd.3 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:15:09.628 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 podman[119351]: 2026-03-09 20:15:09.500207439 +0000 UTC m=+0.107497647 container create dfb37d4b503a8b802713ce80c2a153f3b8c10c845e3f01b98ef7b64578be055e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate, org.label-schema.build-date=20260223, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0) 2026-03-09T20:15:09.628 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 podman[119351]: 2026-03-09 20:15:09.402775833 +0000 UTC m=+0.010066042 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:09.628 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 podman[119351]: 2026-03-09 20:15:09.546043941 +0000 UTC m=+0.153334171 container init dfb37d4b503a8b802713ce80c2a153f3b8c10c845e3f01b98ef7b64578be055e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:15:09.628 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 podman[119351]: 2026-03-09 20:15:09.548957755 +0000 UTC m=+0.156247974 container start dfb37d4b503a8b802713ce80c2a153f3b8c10c845e3f01b98ef7b64578be055e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-09T20:15:09.628 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 podman[119351]: 2026-03-09 20:15:09.554437402 +0000 UTC m=+0.161727631 container attach dfb37d4b503a8b802713ce80c2a153f3b8c10c845e3f01b98ef7b64578be055e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:15:10.082 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:09 vm01 ceph-mon[97066]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:15:10.082 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:09 vm01 ceph-mon[97066]: osdmap e113: 8 total, 7 up, 8 in 2026-03-09T20:15:10.082 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:09 vm01 ceph-mon[98877]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:15:10.082 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:09 vm01 ceph-mon[98877]: osdmap e113: 8 total, 7 up, 8 in 2026-03-09T20:15:10.082 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:10.082 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 bash[119351]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:10.082 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:10.082 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:09 vm01 bash[119351]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:10.082 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:15:10.082 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:10.082 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:15:10.082 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:10.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:09 vm06 ceph-mon[81259]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:15:10.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:09 vm06 ceph-mon[81259]: osdmap e113: 8 total, 7 up, 8 in 2026-03-09T20:15:10.378 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:10.378 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:10.378 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T20:15:10.378 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T20:15:10.378 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-6b4ed91e-e7d9-4118-a8fb-243f9177fe60/osd-block-76c92c68-25ba-439b-bf8a-3eedaeec05b5 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-09T20:15:10.378 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-6b4ed91e-e7d9-4118-a8fb-243f9177fe60/osd-block-76c92c68-25ba-439b-bf8a-3eedaeec05b5 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-09T20:15:10.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:10 vm01 ceph-mon[97066]: osdmap e114: 8 total, 7 up, 8 in 2026-03-09T20:15:10.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:10 vm01 ceph-mon[97066]: pgmap v76: 161 pgs: 26 stale+active+clean, 135 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T20:15:10.880 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/ln -snf /dev/ceph-6b4ed91e-e7d9-4118-a8fb-243f9177fe60/osd-block-76c92c68-25ba-439b-bf8a-3eedaeec05b5 /var/lib/ceph/osd/ceph-3/block 2026-03-09T20:15:10.880 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: Running command: /usr/bin/ln -snf /dev/ceph-6b4ed91e-e7d9-4118-a8fb-243f9177fe60/osd-block-76c92c68-25ba-439b-bf8a-3eedaeec05b5 /var/lib/ceph/osd/ceph-3/block 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate[119361]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119351]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 conmon[119361]: conmon dfb37d4b503a8b802713 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-dfb37d4b503a8b802713ce80c2a153f3b8c10c845e3f01b98ef7b64578be055e.scope/container/memory.events 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 podman[119351]: 2026-03-09 20:15:10.456835555 +0000 UTC m=+1.064125774 container died dfb37d4b503a8b802713ce80c2a153f3b8c10c845e3f01b98ef7b64578be055e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 podman[119351]: 2026-03-09 20:15:10.480148701 +0000 UTC m=+1.087438920 container remove dfb37d4b503a8b802713ce80c2a153f3b8c10c845e3f01b98ef7b64578be055e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-activate, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=squid, io.buildah.version=1.41.3, ceph=True, org.label-schema.build-date=20260223) 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 podman[119610]: 2026-03-09 20:15:10.586571917 +0000 UTC m=+0.019163100 container create b4e0b13e46c0d30a310e1708844f9ed693863f692dcce32de2dccf8283bbfc6f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0) 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 podman[119610]: 2026-03-09 20:15:10.626735842 +0000 UTC m=+0.059327025 container init b4e0b13e46c0d30a310e1708844f9ed693863f692dcce32de2dccf8283bbfc6f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 podman[119610]: 2026-03-09 20:15:10.633802719 +0000 UTC m=+0.066393902 container start b4e0b13e46c0d30a310e1708844f9ed693863f692dcce32de2dccf8283bbfc6f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223) 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 bash[119610]: b4e0b13e46c0d30a310e1708844f9ed693863f692dcce32de2dccf8283bbfc6f 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 podman[119610]: 2026-03-09 20:15:10.579715544 +0000 UTC m=+0.012306736 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:10.881 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:10 vm01 systemd[1]: Started Ceph osd.3 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:15:10.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:10 vm01 ceph-mon[98877]: osdmap e114: 8 total, 7 up, 8 in 2026-03-09T20:15:10.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:10 vm01 ceph-mon[98877]: pgmap v76: 161 pgs: 26 stale+active+clean, 135 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T20:15:11.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:10 vm06 ceph-mon[81259]: osdmap e114: 8 total, 7 up, 8 in 2026-03-09T20:15:11.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:10 vm06 ceph-mon[81259]: pgmap v76: 161 pgs: 26 stale+active+clean, 135 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T20:15:11.628 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:11 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[119620]: 2026-03-09T20:15:11.473+0000 7fdf6a57f740 -1 Falling back to public interface 2026-03-09T20:15:11.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:11 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:11.928 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:11 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:11.928 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:11 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:11.928 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:11 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:12.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:11 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:12.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:11 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:12.818 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:15:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:15:12] "GET /metrics HTTP/1.1" 200 37749 "" "Prometheus/2.51.0" 2026-03-09T20:15:12.819 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[119620]: 2026-03-09T20:15:12.401+0000 7fdf6a57f740 -1 osd.3 0 read_superblock omap replica is missing. 2026-03-09T20:15:12.819 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:12 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[119620]: 2026-03-09T20:15:12.490+0000 7fdf6a57f740 -1 osd.3 112 log_to_monitors true 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[97066]: pgmap v77: 161 pgs: 30 active+undersized, 5 stale+active+clean, 19 active+undersized+degraded, 107 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 57/627 objects degraded (9.091%) 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[97066]: from='osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.071 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:15:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[119620]: 2026-03-09T20:15:13.040+0000 7fdf6232a640 -1 osd.3 112 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[98877]: pgmap v77: 161 pgs: 30 active+undersized, 5 stale+active+clean, 19 active+undersized+degraded, 107 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 57/627 objects degraded (9.091%) 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[98877]: from='osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.071 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:12 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:12 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:12 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:12 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:12 vm06 ceph-mon[81259]: pgmap v77: 161 pgs: 30 active+undersized, 5 stale+active+clean, 19 active+undersized+degraded, 107 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 57/627 objects degraded (9.091%) 2026-03-09T20:15:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:12 vm06 ceph-mon[81259]: from='osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T20:15:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:12 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:12 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:13.834 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:15:13 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:15:13.832+0000 7f1345fe3640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (16 PGs are or would become offline) 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: Health check failed: Degraded data redundancy: 57/627 objects degraded (9.091%), 19 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: osdmap e115: 8 total, 7 up, 8 in 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:14.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: Health check failed: Degraded data redundancy: 57/627 objects degraded (9.091%), 19 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: osdmap e115: 8 total, 7 up, 8 in 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:14.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:13 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: Health check failed: Degraded data redundancy: 57/627 objects degraded (9.091%), 19 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: osdmap e115: 8 total, 7 up, 8 in 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:14.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:13 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:15 vm06 ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:15 vm06 ceph-mon[81259]: Upgrade: unsafe to stop osd(s) at this time (16 PGs are or would become offline) 2026-03-09T20:15:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:15 vm06 ceph-mon[81259]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:15:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:15 vm06 ceph-mon[81259]: osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393] boot 2026-03-09T20:15:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:15 vm06 ceph-mon[81259]: osdmap e116: 8 total, 8 up, 8 in 2026-03-09T20:15:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:15 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:15:15.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:15 vm06 ceph-mon[81259]: pgmap v80: 161 pgs: 37 active+undersized, 25 active+undersized+degraded, 99 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 81/627 objects degraded (12.919%) 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[97066]: Upgrade: unsafe to stop osd(s) at this time (16 PGs are or would become offline) 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[97066]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[97066]: osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393] boot 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[97066]: osdmap e116: 8 total, 8 up, 8 in 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[97066]: pgmap v80: 161 pgs: 37 active+undersized, 25 active+undersized+degraded, 99 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 81/627 objects degraded (12.919%) 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[98877]: Upgrade: unsafe to stop osd(s) at this time (16 PGs are or would become offline) 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[98877]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:15:15.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[98877]: osd.3 [v2:192.168.123.101:6826/2223099393,v1:192.168.123.101:6827/2223099393] boot 2026-03-09T20:15:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[98877]: osdmap e116: 8 total, 8 up, 8 in 2026-03-09T20:15:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T20:15:15.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:15 vm01 ceph-mon[98877]: pgmap v80: 161 pgs: 37 active+undersized, 25 active+undersized+degraded, 99 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 81/627 objects degraded (12.919%) 2026-03-09T20:15:16.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:16 vm01 ceph-mon[97066]: osdmap e117: 8 total, 8 up, 8 in 2026-03-09T20:15:16.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:16 vm01 ceph-mon[98877]: osdmap e117: 8 total, 8 up, 8 in 2026-03-09T20:15:16.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:16 vm06 ceph-mon[81259]: osdmap e117: 8 total, 8 up, 8 in 2026-03-09T20:15:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:17.004Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:17 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:17.005Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:17.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:17 vm01 ceph-mon[97066]: pgmap v82: 161 pgs: 25 active+undersized, 14 active+undersized+degraded, 122 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-09T20:15:17.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:17 vm01 ceph-mon[98877]: pgmap v82: 161 pgs: 25 active+undersized, 14 active+undersized+degraded, 122 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-09T20:15:17.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:17 vm06 ceph-mon[81259]: pgmap v82: 161 pgs: 25 active+undersized, 14 active+undersized+degraded, 122 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-09T20:15:18.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:18 vm06 ceph-mon[81259]: Health check update: Degraded data redundancy: 16/627 objects degraded (2.552%), 4 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:18.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:18 vm01 ceph-mon[97066]: Health check update: Degraded data redundancy: 16/627 objects degraded (2.552%), 4 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:18.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:18 vm01 ceph-mon[98877]: Health check update: Degraded data redundancy: 16/627 objects degraded (2.552%), 4 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:19.530 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:19 vm01 ceph-mon[97066]: pgmap v83: 161 pgs: 4 active+undersized, 4 active+undersized+degraded, 153 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 16/627 objects degraded (2.552%) 2026-03-09T20:15:19.530 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:19 vm01 ceph-mon[98877]: pgmap v83: 161 pgs: 4 active+undersized, 4 active+undersized+degraded, 153 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 16/627 objects degraded (2.552%) 2026-03-09T20:15:19.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:19 vm06 ceph-mon[81259]: pgmap v83: 161 pgs: 4 active+undersized, 4 active+undersized+degraded, 153 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 16/627 objects degraded (2.552%) 2026-03-09T20:15:21.394 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:21 vm06 ceph-mon[81259]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:15:21.394 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:21.394 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:21 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:21.394 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:21 vm06 ceph-mon[81259]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 16/627 objects degraded (2.552%), 4 pgs degraded) 2026-03-09T20:15:21.394 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:21 vm06 ceph-mon[81259]: Cluster is now healthy 2026-03-09T20:15:21.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[97066]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:15:21.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[97066]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 16/627 objects degraded (2.552%), 4 pgs degraded) 2026-03-09T20:15:21.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[97066]: Cluster is now healthy 2026-03-09T20:15:21.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[98877]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:15:21.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:21.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:21.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[98877]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 16/627 objects degraded (2.552%), 4 pgs degraded) 2026-03-09T20:15:21.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:21 vm01 ceph-mon[98877]: Cluster is now healthy 2026-03-09T20:15:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:15:22 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:15:22] "GET /metrics HTTP/1.1" 200 37754 "" "Prometheus/2.51.0" 2026-03-09T20:15:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:23 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:23.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:23 vm06 ceph-mon[81259]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 126 B/s rd, 0 op/s 2026-03-09T20:15:23.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:23 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:23.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:23 vm01 ceph-mon[97066]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 126 B/s rd, 0 op/s 2026-03-09T20:15:23.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:23 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:23.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:23 vm01 ceph-mon[98877]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 126 B/s rd, 0 op/s 2026-03-09T20:15:25.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:25 vm06 ceph-mon[81259]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 614 B/s rd, 0 op/s 2026-03-09T20:15:25.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:25 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:25.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:25 vm01 ceph-mon[97066]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 614 B/s rd, 0 op/s 2026-03-09T20:15:25.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:25 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:25.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:25 vm01 ceph-mon[98877]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 614 B/s rd, 0 op/s 2026-03-09T20:15:25.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:25 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:27 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:27.004Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:27 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:27.005Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:27 vm06 ceph-mon[81259]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 555 B/s rd, 0 op/s 2026-03-09T20:15:27.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:27 vm01 ceph-mon[98877]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 555 B/s rd, 0 op/s 2026-03-09T20:15:27.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:27 vm01 ceph-mon[97066]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 555 B/s rd, 0 op/s 2026-03-09T20:15:29.609 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:29 vm06 ceph-mon[81259]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T20:15:29.609 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:29 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:29.609 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:29 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:29.609 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:29 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T20:15:29.609 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:29 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[98877]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T20:15:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T20:15:29.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:29.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[97066]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T20:15:29.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:29.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:29.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T20:15:29.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:29 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:30.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:29 vm06 systemd[1]: Stopping Ceph osd.4 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:15:30.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[51498]: 2026-03-09T20:15:30.011+0000 7f77a17f5700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:15:30.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[51498]: 2026-03-09T20:15:30.011+0000 7f77a17f5700 -1 osd.4 117 *** Got signal Terminated *** 2026-03-09T20:15:30.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[51498]: 2026-03-09T20:15:30.011+0000 7f77a17f5700 -1 osd.4 117 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:15:30.833 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:30 vm06 ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:30.833 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:30 vm06 ceph-mon[81259]: Upgrade: osd.4 is safe to restart 2026-03-09T20:15:30.833 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:30 vm06 ceph-mon[81259]: Upgrade: Updating osd.4 2026-03-09T20:15:30.833 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:30 vm06 ceph-mon[81259]: Deploying daemon osd.4 on vm06 2026-03-09T20:15:30.833 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:30 vm06 ceph-mon[81259]: osd.4 marked itself down and dead 2026-03-09T20:15:30.835 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 podman[88958]: 2026-03-09 20:15:30.589827609 +0000 UTC m=+0.593966428 container died 8eb62b7332f075bcc9b8e01dfa5808937f4e9f6523e8355a7851713ab3b47385 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Guillaume Abrioux , release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc., version=8, ceph=True, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, GIT_BRANCH=HEAD, io.openshift.expose-services=, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870, io.buildah.version=1.19.8) 2026-03-09T20:15:30.835 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 podman[88958]: 2026-03-09 20:15:30.612294691 +0000 UTC m=+0.616433510 container remove 8eb62b7332f075bcc9b8e01dfa5808937f4e9f6523e8355a7851713ab3b47385 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, distribution-scope=public, io.openshift.expose-services=, build-date=2022-05-03T08:36:31.336870, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, io.openshift.tags=base centos centos-stream, vcs-type=git, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, io.buildah.version=1.19.8, com.redhat.component=centos-stream-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, ceph=True, release=754, GIT_REPO=https://github.com/ceph/ceph-container.git, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/) 2026-03-09T20:15:30.835 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 bash[88958]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4 2026-03-09T20:15:30.835 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 podman[89023]: 2026-03-09 20:15:30.740617233 +0000 UTC m=+0.015587137 container create 6a106efee15256f847c5e8609860327704e70d49ab42a7b06dd8eb298e4c6815 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid) 2026-03-09T20:15:30.835 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 podman[89023]: 2026-03-09 20:15:30.770031497 +0000 UTC m=+0.045001411 container init 6a106efee15256f847c5e8609860327704e70d49ab42a7b06dd8eb298e4c6815 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-deactivate, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True) 2026-03-09T20:15:30.835 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 podman[89023]: 2026-03-09 20:15:30.779031362 +0000 UTC m=+0.054001266 container start 6a106efee15256f847c5e8609860327704e70d49ab42a7b06dd8eb298e4c6815 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-deactivate, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default) 2026-03-09T20:15:30.835 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 podman[89023]: 2026-03-09 20:15:30.780085114 +0000 UTC m=+0.055055018 container attach 6a106efee15256f847c5e8609860327704e70d49ab42a7b06dd8eb298e4c6815 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[97066]: Upgrade: osd.4 is safe to restart 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[97066]: Upgrade: Updating osd.4 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[97066]: Deploying daemon osd.4 on vm06 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[97066]: osd.4 marked itself down and dead 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[98877]: Upgrade: osd.4 is safe to restart 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[98877]: Upgrade: Updating osd.4 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[98877]: Deploying daemon osd.4 on vm06 2026-03-09T20:15:30.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:30 vm01 ceph-mon[98877]: osd.4 marked itself down and dead 2026-03-09T20:15:31.093 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 podman[89023]: 2026-03-09 20:15:30.734326568 +0000 UTC m=+0.009296472 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:31.093 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 podman[89042]: 2026-03-09 20:15:30.923635086 +0000 UTC m=+0.010765219 container died 6a106efee15256f847c5e8609860327704e70d49ab42a7b06dd8eb298e4c6815 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-09T20:15:31.093 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 podman[89042]: 2026-03-09 20:15:30.937685464 +0000 UTC m=+0.024815608 container remove 6a106efee15256f847c5e8609860327704e70d49ab42a7b06dd8eb298e4c6815 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:15:31.093 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.4.service: Deactivated successfully. 2026-03-09T20:15:31.093 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 systemd[1]: Stopped Ceph osd.4 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:15:31.093 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:30 vm06 systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.4.service: Consumed 40.137s CPU time. 2026-03-09T20:15:31.419 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 systemd[1]: Starting Ceph osd.4 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:15:31.419 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 podman[89127]: 2026-03-09 20:15:31.301185848 +0000 UTC m=+0.034195335 container create 1e74295561f6d103351b8fc02e45adc03e412a1a4e63cf67800bf4eb276769ed (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:15:31.419 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 podman[89127]: 2026-03-09 20:15:31.277730697 +0000 UTC m=+0.010740194 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:31.843 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 podman[89127]: 2026-03-09 20:15:31.418372166 +0000 UTC m=+0.151381653 container init 1e74295561f6d103351b8fc02e45adc03e412a1a4e63cf67800bf4eb276769ed (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default) 2026-03-09T20:15:31.843 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 podman[89127]: 2026-03-09 20:15:31.427231448 +0000 UTC m=+0.160240925 container start 1e74295561f6d103351b8fc02e45adc03e412a1a4e63cf67800bf4eb276769ed (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid) 2026-03-09T20:15:31.843 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:31.843 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:31.843 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 bash[89127]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:31.843 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 bash[89127]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:31.843 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:31 vm06 podman[89127]: 2026-03-09 20:15:31.519797904 +0000 UTC m=+0.252807391 container attach 1e74295561f6d103351b8fc02e45adc03e412a1a4e63cf67800bf4eb276769ed (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate, CEPH_REF=squid, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2) 2026-03-09T20:15:32.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:31 vm01 ceph-mon[97066]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:32.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:31 vm01 ceph-mon[97066]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:15:32.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:31 vm01 ceph-mon[97066]: osdmap e118: 8 total, 7 up, 8 in 2026-03-09T20:15:32.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:31 vm01 ceph-mon[98877]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:32.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:31 vm01 ceph-mon[98877]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:15:32.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:31 vm01 ceph-mon[98877]: osdmap e118: 8 total, 7 up, 8 in 2026-03-09T20:15:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:31 vm06 ceph-mon[81259]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 188 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:31 vm06 ceph-mon[81259]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:15:32.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:31 vm06 ceph-mon[81259]: osdmap e118: 8 total, 7 up, 8 in 2026-03-09T20:15:32.342 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:15:32.342 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:15:32.342 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:32.342 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:32.342 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:32.342 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:32.342 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T20:15:32.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T20:15:32.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-70cda563-d345-4e4e-918c-a84dcd2a568c/osd-block-6b56be69-0ed2-425d-90bc-bb41ca45e362 --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T20:15:32.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-70cda563-d345-4e4e-918c-a84dcd2a568c/osd-block-6b56be69-0ed2-425d-90bc-bb41ca45e362 --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/ln -snf /dev/ceph-70cda563-d345-4e4e-918c-a84dcd2a568c/osd-block-6b56be69-0ed2-425d-90bc-bb41ca45e362 /var/lib/ceph/osd/ceph-4/block 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: Running command: /usr/bin/ln -snf /dev/ceph-70cda563-d345-4e4e-918c-a84dcd2a568c/osd-block-6b56be69-0ed2-425d-90bc-bb41ca45e362 /var/lib/ceph/osd/ceph-4/block 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate[89141]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89127]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 conmon[89141]: conmon 1e74295561f6d103351b : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-1e74295561f6d103351b8fc02e45adc03e412a1a4e63cf67800bf4eb276769ed.scope/container/memory.events 2026-03-09T20:15:32.663 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 podman[89127]: 2026-03-09 20:15:32.442807592 +0000 UTC m=+1.175817079 container died 1e74295561f6d103351b8fc02e45adc03e412a1a4e63cf67800bf4eb276769ed (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:15:32.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:15:32 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:15:32] "GET /metrics HTTP/1.1" 200 37754 "" "Prometheus/2.51.0" 2026-03-09T20:15:32.913 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:32 vm06 ceph-mon[81259]: osdmap e119: 8 total, 7 up, 8 in 2026-03-09T20:15:32.913 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:32 vm06 ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:32.913 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:32 vm06 ceph-mon[81259]: pgmap v92: 161 pgs: 50 peering, 8 stale+active+clean, 103 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 0 op/s 2026-03-09T20:15:32.919 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 podman[89127]: 2026-03-09 20:15:32.760641431 +0000 UTC m=+1.493650918 container remove 1e74295561f6d103351b8fc02e45adc03e412a1a4e63cf67800bf4eb276769ed (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-activate, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:15:32.920 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 podman[89387]: 2026-03-09 20:15:32.87661122 +0000 UTC m=+0.020040270 container create ddc3d6821c0afd814f2655a9f3f56fe336ccf36df173414588cf1f2d8e5a26c2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:15:33.266 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 podman[89387]: 2026-03-09 20:15:32.921274457 +0000 UTC m=+0.064703516 container init ddc3d6821c0afd814f2655a9f3f56fe336ccf36df173414588cf1f2d8e5a26c2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:15:33.267 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 podman[89387]: 2026-03-09 20:15:32.927392669 +0000 UTC m=+0.070821728 container start ddc3d6821c0afd814f2655a9f3f56fe336ccf36df173414588cf1f2d8e5a26c2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.41.3) 2026-03-09T20:15:33.267 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 bash[89387]: ddc3d6821c0afd814f2655a9f3f56fe336ccf36df173414588cf1f2d8e5a26c2 2026-03-09T20:15:33.267 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 podman[89387]: 2026-03-09 20:15:32.869494611 +0000 UTC m=+0.012923679 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:33.267 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:32 vm06 systemd[1]: Started Ceph osd.4 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:15:33.267 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:33 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[89398]: 2026-03-09T20:15:33.264+0000 7fa20b65d740 -1 Falling back to public interface 2026-03-09T20:15:33.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:32 vm01 ceph-mon[97066]: osdmap e119: 8 total, 7 up, 8 in 2026-03-09T20:15:33.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:32 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:33.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:32 vm01 ceph-mon[97066]: pgmap v92: 161 pgs: 50 peering, 8 stale+active+clean, 103 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 0 op/s 2026-03-09T20:15:33.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:32 vm01 ceph-mon[98877]: osdmap e119: 8 total, 7 up, 8 in 2026-03-09T20:15:33.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:32 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:33.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:32 vm01 ceph-mon[98877]: pgmap v92: 161 pgs: 50 peering, 8 stale+active+clean, 103 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 0 op/s 2026-03-09T20:15:34.020 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:33 vm06 ceph-mon[81259]: Health check failed: Reduced data availability: 6 pgs inactive, 11 pgs peering (PG_AVAILABILITY) 2026-03-09T20:15:34.021 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:34.021 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:33 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:34.280 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[89398]: 2026-03-09T20:15:34.131+0000 7fa20b65d740 -1 osd.4 0 read_superblock omap replica is missing. 2026-03-09T20:15:34.280 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:34 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[89398]: 2026-03-09T20:15:34.186+0000 7fa20b65d740 -1 osd.4 117 log_to_monitors true 2026-03-09T20:15:34.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:33 vm01 ceph-mon[97066]: Health check failed: Reduced data availability: 6 pgs inactive, 11 pgs peering (PG_AVAILABILITY) 2026-03-09T20:15:34.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:34.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:33 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:34.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:33 vm01 ceph-mon[98877]: Health check failed: Reduced data availability: 6 pgs inactive, 11 pgs peering (PG_AVAILABILITY) 2026-03-09T20:15:34.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:34.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:33 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:35.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:34 vm06 ceph-mon[81259]: pgmap v93: 161 pgs: 11 active+undersized, 50 peering, 8 active+undersized+degraded, 92 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-09T20:15:35.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:34 vm06 ceph-mon[81259]: from='osd.4 [v2:192.168.123.106:6800/263448586,v1:192.168.123.106:6801/263448586]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T20:15:35.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:34 vm06 ceph-mon[81259]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T20:15:35.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:35.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:34 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:35.252 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:15:35 vm06 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[89398]: 2026-03-09T20:15:35.002+0000 7fa203408640 -1 osd.4 117 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T20:15:35.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[97066]: pgmap v93: 161 pgs: 11 active+undersized, 50 peering, 8 active+undersized+degraded, 92 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-09T20:15:35.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[97066]: from='osd.4 [v2:192.168.123.106:6800/263448586,v1:192.168.123.106:6801/263448586]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T20:15:35.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[97066]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T20:15:35.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:35.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:35.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[98877]: pgmap v93: 161 pgs: 11 active+undersized, 50 peering, 8 active+undersized+degraded, 92 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-09T20:15:35.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[98877]: from='osd.4 [v2:192.168.123.106:6800/263448586,v1:192.168.123.106:6801/263448586]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T20:15:35.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[98877]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T20:15:35.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:35.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:34 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:36 vm06 ceph-mon[81259]: Health check failed: Degraded data redundancy: 31/627 objects degraded (4.944%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:36 vm06 ceph-mon[81259]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T20:15:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:36 vm06 ceph-mon[81259]: osdmap e120: 8 total, 7 up, 8 in 2026-03-09T20:15:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:36 vm06 ceph-mon[81259]: from='osd.4 [v2:192.168.123.106:6800/263448586,v1:192.168.123.106:6801/263448586]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:36 vm06 ceph-mon[81259]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:36 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:36.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:15:36 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:15:36.180+0000 7f1345fe3640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-09T20:15:36.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[97066]: Health check failed: Degraded data redundancy: 31/627 objects degraded (4.944%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:36.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[97066]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T20:15:36.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[97066]: osdmap e120: 8 total, 7 up, 8 in 2026-03-09T20:15:36.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[97066]: from='osd.4 [v2:192.168.123.106:6800/263448586,v1:192.168.123.106:6801/263448586]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:36.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[97066]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:36.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[98877]: Health check failed: Degraded data redundancy: 31/627 objects degraded (4.944%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[98877]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[98877]: osdmap e120: 8 total, 7 up, 8 in 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[98877]: from='osd.4 [v2:192.168.123.106:6800/263448586,v1:192.168.123.106:6801/263448586]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[98877]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:36.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:36 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: osd.4 [v2:192.168.123.106:6800/263448586,v1:192.168.123.106:6801/263448586] boot 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: osdmap e121: 8 total, 8 up, 8 in 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: pgmap v96: 161 pgs: 19 active+undersized, 38 peering, 12 active+undersized+degraded, 92 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 44/627 objects degraded (7.018%) 2026-03-09T20:15:37.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:37.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:37.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:37.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:37.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:37.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:37.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: Upgrade: unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-09T20:15:37.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:37 vm06 ceph-mon[81259]: osdmap e122: 8 total, 8 up, 8 in 2026-03-09T20:15:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:37.005Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:37.006Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: osd.4 [v2:192.168.123.106:6800/263448586,v1:192.168.123.106:6801/263448586] boot 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: osdmap e121: 8 total, 8 up, 8 in 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: pgmap v96: 161 pgs: 19 active+undersized, 38 peering, 12 active+undersized+degraded, 92 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 44/627 objects degraded (7.018%) 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: Upgrade: unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[97066]: osdmap e122: 8 total, 8 up, 8 in 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: osd.4 [v2:192.168.123.106:6800/263448586,v1:192.168.123.106:6801/263448586] boot 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: osdmap e121: 8 total, 8 up, 8 in 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: pgmap v96: 161 pgs: 19 active+undersized, 38 peering, 12 active+undersized+degraded, 92 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 44/627 objects degraded (7.018%) 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: Upgrade: unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-09T20:15:37.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:37 vm01 ceph-mon[98877]: osdmap e122: 8 total, 8 up, 8 in 2026-03-09T20:15:37.616 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (15m) 26s ago 22m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (3m) 3s ago 22m 76.7M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (3m) 26s ago 21m 52.5M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (3m) 3s ago 23m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (12m) 26s ago 24m 559M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (2m) 26s ago 24m 53.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (2m) 3s ago 24m 46.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (2m) 26s ago 24m 42.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (15m) 26s ago 22m 10.4M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (15m) 3s ago 22m 10.0M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (101s) 26s ago 23m 70.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (79s) 26s ago 23m 49.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (64s) 26s ago 23m 44.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (27s) 26s ago 23m 13.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b4e0b13e46c0 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (5s) 3s ago 23m 13.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e ddc3d6821c0a 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (23m) 3s ago 23m 58.7M 4096M 17.2.0 e1d6a67b021e cb05c08dd731 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (22m) 3s ago 22m 54.9M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (22m) 3s ago 22m 59.3M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (3m) 3s ago 22m 52.7M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (22m) 26s ago 22m 96.8M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:15:38.027 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (22m) 3s ago 22m 96.0M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:15:38.290 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:38 vm01 ceph-mon[97066]: Health check update: Reduced data availability: 8 pgs inactive, 12 pgs peering (PG_AVAILABILITY) 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3, 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 10 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:15:38.290 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:15:38.290 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:38 vm01 ceph-mon[98877]: Health check update: Reduced data availability: 8 pgs inactive, 12 pgs peering (PG_AVAILABILITY) 2026-03-09T20:15:38.343 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:38 vm06.local ceph-mon[81259]: Health check update: Reduced data availability: 8 pgs inactive, 12 pgs peering (PG_AVAILABILITY) 2026-03-09T20:15:38.506 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:15:38.507 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:15:38.507 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:15:38.507 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-09T20:15:38.507 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:15:38.507 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "5/8 daemons upgraded", 2026-03-09T20:15:38.507 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T20:15:38.507 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:15:38.507 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:15:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:39 vm06.local ceph-mon[81259]: from='client.54292 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:39 vm06.local ceph-mon[81259]: from='client.54298 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:39 vm06.local ceph-mon[81259]: from='client.54304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:39 vm06.local ceph-mon[81259]: pgmap v98: 161 pgs: 16 active+undersized, 38 peering, 11 active+undersized+degraded, 96 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-09T20:15:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:39 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/3329253830' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[97066]: from='client.54292 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[97066]: from='client.54298 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[97066]: from='client.54304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[97066]: pgmap v98: 161 pgs: 16 active+undersized, 38 peering, 11 active+undersized+degraded, 96 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[97066]: from='client.? 192.168.123.101:0/3329253830' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[98877]: from='client.54292 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[98877]: from='client.54298 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[98877]: from='client.54304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[98877]: pgmap v98: 161 pgs: 16 active+undersized, 38 peering, 11 active+undersized+degraded, 96 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-09T20:15:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:39 vm01 ceph-mon[98877]: from='client.? 192.168.123.101:0/3329253830' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:40.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:40 vm06.local ceph-mon[81259]: from='client.54310 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:40.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:40 vm06.local ceph-mon[81259]: Health check update: Degraded data redundancy: 40/627 objects degraded (6.380%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:40.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:40 vm01 ceph-mon[97066]: from='client.54310 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:40.649 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:40 vm01 ceph-mon[97066]: Health check update: Degraded data redundancy: 40/627 objects degraded (6.380%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:40.649 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:40 vm01 ceph-mon[98877]: from='client.54310 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:15:40.649 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:40 vm01 ceph-mon[98877]: Health check update: Degraded data redundancy: 40/627 objects degraded (6.380%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:41.562 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:41 vm06.local ceph-mon[81259]: pgmap v99: 161 pgs: 7 active+undersized, 26 peering, 2 active+undersized+degraded, 126 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 8/627 objects degraded (1.276%) 2026-03-09T20:15:41.563 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:41 vm06.local ceph-mon[81259]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 8 pgs inactive, 12 pgs peering) 2026-03-09T20:15:41.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:41 vm01 ceph-mon[97066]: pgmap v99: 161 pgs: 7 active+undersized, 26 peering, 2 active+undersized+degraded, 126 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 8/627 objects degraded (1.276%) 2026-03-09T20:15:41.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:41 vm01 ceph-mon[97066]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 8 pgs inactive, 12 pgs peering) 2026-03-09T20:15:41.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:41 vm01 ceph-mon[98877]: pgmap v99: 161 pgs: 7 active+undersized, 26 peering, 2 active+undersized+degraded, 126 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 8/627 objects degraded (1.276%) 2026-03-09T20:15:41.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:41 vm01 ceph-mon[98877]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 8 pgs inactive, 12 pgs peering) 2026-03-09T20:15:42.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:42 vm06.local ceph-mon[81259]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 8/627 objects degraded (1.276%), 2 pgs degraded) 2026-03-09T20:15:42.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:42 vm06.local ceph-mon[81259]: Cluster is now healthy 2026-03-09T20:15:42.606 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:42 vm01 ceph-mon[97066]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 8/627 objects degraded (1.276%), 2 pgs degraded) 2026-03-09T20:15:42.606 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:42 vm01 ceph-mon[97066]: Cluster is now healthy 2026-03-09T20:15:42.607 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:42 vm01 ceph-mon[98877]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 8/627 objects degraded (1.276%), 2 pgs degraded) 2026-03-09T20:15:42.607 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:42 vm01 ceph-mon[98877]: Cluster is now healthy 2026-03-09T20:15:42.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:15:42 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:15:42] "GET /metrics HTTP/1.1" 200 37701 "" "Prometheus/2.51.0" 2026-03-09T20:15:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:43 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:43 vm06.local ceph-mon[81259]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 716 B/s rd, 0 op/s 2026-03-09T20:15:43.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:43 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:43.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:43 vm01 ceph-mon[97066]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 716 B/s rd, 0 op/s 2026-03-09T20:15:43.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:43 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:43.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:43 vm01 ceph-mon[98877]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 716 B/s rd, 0 op/s 2026-03-09T20:15:45.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:45 vm06.local ceph-mon[81259]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:45 vm01 ceph-mon[97066]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:45.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:45 vm01 ceph-mon[98877]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:15:47.297 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:47.006Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:47.297 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:47 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:47.006Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:47.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:47 vm06.local ceph-mon[81259]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:15:47.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:47 vm01 ceph-mon[97066]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:15:47.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:47 vm01 ceph-mon[98877]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:15:49.585 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:49 vm01 ceph-mon[97066]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:15:49.585 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:49 vm01 ceph-mon[98877]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:15:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:49 vm06.local ceph-mon[81259]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:15:51.394 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:51 vm06.local ceph-mon[81259]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:51.394 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:51 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:51.394 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:51 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:51.394 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:51 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:51 vm01 ceph-mon[97066]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:51 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:51 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:51 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:51.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:51 vm01 ceph-mon[98877]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:51.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:51 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:51.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:51 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:15:51.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:51 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:52.193 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:52.193 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:52 vm06.local ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:52.193 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:52 vm06.local ceph-mon[81259]: Upgrade: osd.5 is safe to restart 2026-03-09T20:15:52.193 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:52.194 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T20:15:52.194 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:52.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:52.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:52.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[97066]: Upgrade: osd.5 is safe to restart 2026-03-09T20:15:52.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:52.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T20:15:52.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:52.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:52.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T20:15:52.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[98877]: Upgrade: osd.5 is safe to restart 2026-03-09T20:15:52.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:52.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T20:15:52.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:52 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:52.592 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:52 vm06.local systemd[1]: Stopping Ceph osd.5 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:15:52.593 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:52 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[54214]: 2026-03-09T20:15:52.415+0000 7fe1c7d37700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:15:52.593 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:52 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[54214]: 2026-03-09T20:15:52.415+0000 7fe1c7d37700 -1 osd.5 122 *** Got signal Terminated *** 2026-03-09T20:15:52.593 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:52 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[54214]: 2026-03-09T20:15:52.415+0000 7fe1c7d37700 -1 osd.5 122 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:15:52.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:15:52 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:15:52] "GET /metrics HTTP/1.1" 200 37851 "" "Prometheus/2.51.0" 2026-03-09T20:15:53.483 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:53 vm06.local ceph-mon[81259]: Upgrade: Updating osd.5 2026-03-09T20:15:53.483 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:53 vm06.local ceph-mon[81259]: Deploying daemon osd.5 on vm06 2026-03-09T20:15:53.483 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:53 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:53.483 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:53 vm06.local ceph-mon[81259]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:53.483 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:53 vm06.local ceph-mon[81259]: osd.5 marked itself down and dead 2026-03-09T20:15:53.483 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local podman[93122]: 2026-03-09 20:15:53.269869036 +0000 UTC m=+0.867318941 container died cb05c08dd731732e9e02f281eefb2dd63c1b88a19aebadf3b8699e845a8a2286 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, RELEASE=HEAD, version=8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, com.redhat.component=centos-stream-container, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.display-name=CentOS Stream 8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, io.buildah.version=1.19.8, io.openshift.expose-services=, vendor=Red Hat, Inc., io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, ceph=True, distribution-scope=public) 2026-03-09T20:15:53.483 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local podman[93122]: 2026-03-09 20:15:53.293258184 +0000 UTC m=+0.890708089 container remove cb05c08dd731732e9e02f281eefb2dd63c1b88a19aebadf3b8699e845a8a2286 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5, RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, release=754, GIT_REPO=https://github.com/ceph/ceph-container.git, GIT_BRANCH=HEAD, build-date=2022-05-03T08:36:31.336870, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_CLEAN=True, name=centos-stream, io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=8, com.redhat.component=centos-stream-container, ceph=True, vcs-type=git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream) 2026-03-09T20:15:53.483 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local bash[93122]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5 2026-03-09T20:15:53.483 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local podman[93191]: 2026-03-09 20:15:53.458833761 +0000 UTC m=+0.021303604 container create 02f0148787a05aeef07b5aba65c9cd0f58b0005416f8534307b230706d3df698 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-deactivate, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid) 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[97066]: Upgrade: Updating osd.5 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[97066]: Deploying daemon osd.5 on vm06 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[97066]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[97066]: osd.5 marked itself down and dead 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[98877]: Upgrade: Updating osd.5 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[98877]: Deploying daemon osd.5 on vm06 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[98877]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:15:53.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:53 vm01 ceph-mon[98877]: osd.5 marked itself down and dead 2026-03-09T20:15:53.779 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local podman[93191]: 2026-03-09 20:15:53.501966994 +0000 UTC m=+0.064436837 container init 02f0148787a05aeef07b5aba65c9cd0f58b0005416f8534307b230706d3df698 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_REF=squid, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:15:53.779 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local podman[93191]: 2026-03-09 20:15:53.505821088 +0000 UTC m=+0.068290931 container start 02f0148787a05aeef07b5aba65c9cd0f58b0005416f8534307b230706d3df698 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-deactivate, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=squid) 2026-03-09T20:15:53.779 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local podman[93191]: 2026-03-09 20:15:53.513162128 +0000 UTC m=+0.075631981 container attach 02f0148787a05aeef07b5aba65c9cd0f58b0005416f8534307b230706d3df698 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-deactivate, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:15:53.779 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local podman[93191]: 2026-03-09 20:15:53.451580465 +0000 UTC m=+0.014050319 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:53.779 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local podman[93210]: 2026-03-09 20:15:53.672522642 +0000 UTC m=+0.011743450 container died 02f0148787a05aeef07b5aba65c9cd0f58b0005416f8534307b230706d3df698 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-deactivate, CEPH_REF=squid, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:15:53.779 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local podman[93210]: 2026-03-09 20:15:53.687647442 +0000 UTC m=+0.026868250 container remove 02f0148787a05aeef07b5aba65c9cd0f58b0005416f8534307b230706d3df698 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:15:53.779 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.5.service: Deactivated successfully. 2026-03-09T20:15:53.779 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local systemd[1]: Stopped Ceph osd.5 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:15:53.779 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.5.service: Consumed 7.788s CPU time. 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:53 vm06.local systemd[1]: Starting Ceph osd.5 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local podman[93295]: 2026-03-09 20:15:53.999945791 +0000 UTC m=+0.023944538 container create 763170fe7a297114c99b4616fcec82cf3afb0947b1f6896d05dd07500aa7afdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate, CEPH_REF=squid, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local podman[93295]: 2026-03-09 20:15:54.042163861 +0000 UTC m=+0.066162608 container init 763170fe7a297114c99b4616fcec82cf3afb0947b1f6896d05dd07500aa7afdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local podman[93295]: 2026-03-09 20:15:54.045264675 +0000 UTC m=+0.069263422 container start 763170fe7a297114c99b4616fcec82cf3afb0947b1f6896d05dd07500aa7afdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0) 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local podman[93295]: 2026-03-09 20:15:54.053686327 +0000 UTC m=+0.077685074 container attach 763170fe7a297114c99b4616fcec82cf3afb0947b1f6896d05dd07500aa7afdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local podman[93295]: 2026-03-09 20:15:53.992691713 +0000 UTC m=+0.016690470 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:54.143 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-mon[81259]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:15:54.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-mon[81259]: osdmap e123: 8 total, 7 up, 8 in 2026-03-09T20:15:54.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:54 vm01 ceph-mon[97066]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:15:54.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:54 vm01 ceph-mon[97066]: osdmap e123: 8 total, 7 up, 8 in 2026-03-09T20:15:54.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:54 vm01 ceph-mon[98877]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:15:54.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:54 vm01 ceph-mon[98877]: osdmap e123: 8 total, 7 up, 8 in 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-d61c8ef5-adaa-4013-946d-92b8261cabe1/osd-block-2ac39182-ee32-432b-bcf1-842662adc4ce --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-d61c8ef5-adaa-4013-946d-92b8261cabe1/osd-block-2ac39182-ee32-432b-bcf1-842662adc4ce --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-09T20:15:54.954 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/ln -snf /dev/ceph-d61c8ef5-adaa-4013-946d-92b8261cabe1/osd-block-2ac39182-ee32-432b-bcf1-842662adc4ce /var/lib/ceph/osd/ceph-5/block 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/ln -snf /dev/ceph-d61c8ef5-adaa-4013-946d-92b8261cabe1/osd-block-2ac39182-ee32-432b-bcf1-842662adc4ce /var/lib/ceph/osd/ceph-5/block 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate[93306]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local bash[93295]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:54 vm06.local podman[93295]: 2026-03-09 20:15:54.985135069 +0000 UTC m=+1.009133816 container died 763170fe7a297114c99b4616fcec82cf3afb0947b1f6896d05dd07500aa7afdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0) 2026-03-09T20:15:55.210 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:55 vm06.local podman[93295]: 2026-03-09 20:15:55.002517553 +0000 UTC m=+1.026516300 container remove 763170fe7a297114c99b4616fcec82cf3afb0947b1f6896d05dd07500aa7afdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-activate, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0) 2026-03-09T20:15:55.211 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:55 vm06.local podman[93538]: 2026-03-09 20:15:55.145195515 +0000 UTC m=+0.041621776 container create f56302ede3ae609120bd5a3caae4a640abc196cc3f456264e7a8bbbc957c14eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0) 2026-03-09T20:15:55.211 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:55 vm06.local podman[93538]: 2026-03-09 20:15:55.185381861 +0000 UTC m=+0.081808122 container init f56302ede3ae609120bd5a3caae4a640abc196cc3f456264e7a8bbbc957c14eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:15:55.211 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:55 vm06.local podman[93538]: 2026-03-09 20:15:55.188144572 +0000 UTC m=+0.084570833 container start f56302ede3ae609120bd5a3caae4a640abc196cc3f456264e7a8bbbc957c14eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.build-date=20260223) 2026-03-09T20:15:55.211 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:55 vm06.local bash[93538]: f56302ede3ae609120bd5a3caae4a640abc196cc3f456264e7a8bbbc957c14eb 2026-03-09T20:15:55.211 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:55 vm06.local podman[93538]: 2026-03-09 20:15:55.115227473 +0000 UTC m=+0.011653745 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:15:55.211 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:55 vm06.local systemd[1]: Started Ceph osd.5 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:15:55.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:55 vm06.local ceph-mon[81259]: pgmap v107: 161 pgs: 6 peering, 19 stale+active+clean, 136 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:15:55.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:55 vm06.local ceph-mon[81259]: Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY) 2026-03-09T20:15:55.535 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:55 vm06.local ceph-mon[81259]: osdmap e124: 8 total, 7 up, 8 in 2026-03-09T20:15:55.535 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:55 vm06.local ceph-osd[93553]: -- 192.168.123.106:0/3752413812 <== mon.2 v2:192.168.123.106:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x55b91b69c960 con 0x55b91c486400 2026-03-09T20:15:55.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:55 vm01 ceph-mon[97066]: pgmap v107: 161 pgs: 6 peering, 19 stale+active+clean, 136 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:15:55.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:55 vm01 ceph-mon[97066]: Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY) 2026-03-09T20:15:55.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:55 vm01 ceph-mon[97066]: osdmap e124: 8 total, 7 up, 8 in 2026-03-09T20:15:55.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:55 vm01 ceph-mon[98877]: pgmap v107: 161 pgs: 6 peering, 19 stale+active+clean, 136 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:15:55.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:55 vm01 ceph-mon[98877]: Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY) 2026-03-09T20:15:55.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:55 vm01 ceph-mon[98877]: osdmap e124: 8 total, 7 up, 8 in 2026-03-09T20:15:55.842 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:55 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:15:55.757+0000 7f55bee88740 -1 Falling back to public interface 2026-03-09T20:15:56.128 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:56 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:15:56.107+0000 7f55bee88740 -1 osd.5 0 read_superblock omap replica is missing. 2026-03-09T20:15:56.453 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:56 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:56.453 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:56 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:56.453 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:56 vm06.local ceph-mon[81259]: from='osd.5 [v2:192.168.123.106:6808/1963105426,v1:192.168.123.106:6809/1963105426]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T20:15:56.453 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:56 vm06.local ceph-mon[81259]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T20:15:56.453 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:56 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:15:56.126+0000 7f55bee88740 -1 osd.5 122 log_to_monitors true 2026-03-09T20:15:56.453 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:15:56 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:15:56.283+0000 7f55b6c33640 -1 osd.5 122 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T20:15:56.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:56 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:56.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:56 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:56.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:56 vm01 ceph-mon[97066]: from='osd.5 [v2:192.168.123.106:6808/1963105426,v1:192.168.123.106:6809/1963105426]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T20:15:56.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:56 vm01 ceph-mon[97066]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T20:15:56.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:56 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:56.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:56 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:56.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:56 vm01 ceph-mon[98877]: from='osd.5 [v2:192.168.123.106:6808/1963105426,v1:192.168.123.106:6809/1963105426]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T20:15:56.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:56 vm01 ceph-mon[98877]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T20:15:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:57.006Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:15:57.007Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:15:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: pgmap v109: 161 pgs: 25 active+undersized, 6 peering, 5 stale+active+clean, 12 active+undersized+degraded, 113 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 0 op/s; 43/627 objects degraded (6.858%) 2026-03-09T20:15:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: Health check failed: Degraded data redundancy: 43/627 objects degraded (6.858%), 12 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T20:15:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: osdmap e125: 8 total, 7 up, 8 in 2026-03-09T20:15:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: from='osd.5 [v2:192.168.123.106:6808/1963105426,v1:192.168.123.106:6809/1963105426]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: pgmap v109: 161 pgs: 25 active+undersized, 6 peering, 5 stale+active+clean, 12 active+undersized+degraded, 113 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 0 op/s; 43/627 objects degraded (6.858%) 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: Health check failed: Degraded data redundancy: 43/627 objects degraded (6.858%), 12 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: osdmap e125: 8 total, 7 up, 8 in 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: from='osd.5 [v2:192.168.123.106:6808/1963105426,v1:192.168.123.106:6809/1963105426]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:57 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.517 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: pgmap v109: 161 pgs: 25 active+undersized, 6 peering, 5 stale+active+clean, 12 active+undersized+degraded, 113 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 0 op/s; 43/627 objects degraded (6.858%) 2026-03-09T20:15:57.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: Health check failed: Degraded data redundancy: 43/627 objects degraded (6.858%), 12 pgs degraded (PG_DEGRADED) 2026-03-09T20:15:57.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T20:15:57.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: osdmap e125: 8 total, 7 up, 8 in 2026-03-09T20:15:57.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: from='osd.5 [v2:192.168.123.106:6808/1963105426,v1:192.168.123.106:6809/1963105426]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:57.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:15:57.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:57.518 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:57 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:15:58.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: osd.5 [v2:192.168.123.106:6808/1963105426,v1:192.168.123.106:6809/1963105426] boot 2026-03-09T20:15:58.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: osdmap e126: 8 total, 8 up, 8 in 2026-03-09T20:15:58.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:15:58.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.252 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.253 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:58.253 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:58.253 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.253 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:58.253 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:58.253 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:58.253 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:58.253 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:58 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:15:58 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:15:58.062+0000 7f1345fe3640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (16 PGs are or would become offline) 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: osd.5 [v2:192.168.123.106:6808/1963105426,v1:192.168.123.106:6809/1963105426] boot 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: osdmap e126: 8 total, 8 up, 8 in 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:58.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: osd.5 [v2:192.168.123.106:6808/1963105426,v1:192.168.123.106:6809/1963105426] boot 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: osdmap e126: 8 total, 8 up, 8 in 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:15:58.379 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:58 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:15:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:59 vm06.local ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:15:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:59 vm06.local ceph-mon[81259]: Upgrade: unsafe to stop osd(s) at this time (16 PGs are or would become offline) 2026-03-09T20:15:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:59 vm06.local ceph-mon[81259]: pgmap v112: 161 pgs: 29 active+undersized, 15 peering, 14 active+undersized+degraded, 103 active+clean; 457 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 45/627 objects degraded (7.177%) 2026-03-09T20:15:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:15:59 vm06.local ceph-mon[81259]: osdmap e127: 8 total, 8 up, 8 in 2026-03-09T20:15:59.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:59 vm01 ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:15:59.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:59 vm01 ceph-mon[97066]: Upgrade: unsafe to stop osd(s) at this time (16 PGs are or would become offline) 2026-03-09T20:15:59.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:59 vm01 ceph-mon[97066]: pgmap v112: 161 pgs: 29 active+undersized, 15 peering, 14 active+undersized+degraded, 103 active+clean; 457 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 45/627 objects degraded (7.177%) 2026-03-09T20:15:59.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:15:59 vm01 ceph-mon[97066]: osdmap e127: 8 total, 8 up, 8 in 2026-03-09T20:15:59.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:59 vm01 ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:15:59.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:59 vm01 ceph-mon[98877]: Upgrade: unsafe to stop osd(s) at this time (16 PGs are or would become offline) 2026-03-09T20:15:59.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:59 vm01 ceph-mon[98877]: pgmap v112: 161 pgs: 29 active+undersized, 15 peering, 14 active+undersized+degraded, 103 active+clean; 457 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 45/627 objects degraded (7.177%) 2026-03-09T20:15:59.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:15:59 vm01 ceph-mon[98877]: osdmap e127: 8 total, 8 up, 8 in 2026-03-09T20:16:01.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:01 vm06.local ceph-mon[81259]: pgmap v114: 161 pgs: 25 active+undersized, 10 peering, 13 active+undersized+degraded, 113 active+clean; 457 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 44/627 objects degraded (7.018%) 2026-03-09T20:16:01.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:01 vm01 ceph-mon[97066]: pgmap v114: 161 pgs: 25 active+undersized, 10 peering, 13 active+undersized+degraded, 113 active+clean; 457 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 44/627 objects degraded (7.018%) 2026-03-09T20:16:01.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:01 vm01 ceph-mon[98877]: pgmap v114: 161 pgs: 25 active+undersized, 10 peering, 13 active+undersized+degraded, 113 active+clean; 457 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 44/627 objects degraded (7.018%) 2026-03-09T20:16:02.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:02 vm06.local ceph-mon[81259]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 1 pg peering) 2026-03-09T20:16:02.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:02 vm06.local ceph-mon[81259]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 44/627 objects degraded (7.018%), 13 pgs degraded) 2026-03-09T20:16:02.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:02 vm06.local ceph-mon[81259]: Cluster is now healthy 2026-03-09T20:16:02.628 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:16:02 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:16:02] "GET /metrics HTTP/1.1" 200 37851 "" "Prometheus/2.51.0" 2026-03-09T20:16:02.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:02 vm01 ceph-mon[97066]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 1 pg peering) 2026-03-09T20:16:02.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:02 vm01 ceph-mon[97066]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 44/627 objects degraded (7.018%), 13 pgs degraded) 2026-03-09T20:16:02.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:02 vm01 ceph-mon[97066]: Cluster is now healthy 2026-03-09T20:16:02.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:02 vm01 ceph-mon[98877]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 1 pg peering) 2026-03-09T20:16:02.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:02 vm01 ceph-mon[98877]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 44/627 objects degraded (7.018%), 13 pgs degraded) 2026-03-09T20:16:02.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:02 vm01 ceph-mon[98877]: Cluster is now healthy 2026-03-09T20:16:03.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:03 vm01 ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:03.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:03 vm01 ceph-mon[97066]: pgmap v115: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:03.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:03 vm01 ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:03.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:03 vm01 ceph-mon[98877]: pgmap v115: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:03.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:03 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:03.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:03 vm06.local ceph-mon[81259]: pgmap v115: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:05 vm01 ceph-mon[97066]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:05 vm01 ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:05 vm01 ceph-mon[98877]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:05 vm01 ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:05 vm06.local ceph-mon[81259]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:05 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:07.376 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:07.007Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:07.376 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:07 vm01 ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:07.008Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:07.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:07 vm01 ceph-mon[97066]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:07.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:07 vm01 ceph-mon[98877]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:07 vm06.local ceph-mon[81259]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:08.740 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:16:09.177 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:16:09.177 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (16m) 57s ago 22m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:16:09.177 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (3m) 12s ago 22m 74.6M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:16:09.177 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (3m) 57s ago 22m 52.5M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:16:09.177 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (4m) 12s ago 24m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:16:09.177 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (12m) 57s ago 25m 559M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:16:09.177 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (2m) 57s ago 25m 53.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (3m) 12s ago 24m 50.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (2m) 57s ago 24m 42.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (16m) 57s ago 23m 10.4M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (16m) 12s ago 22m 10.1M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (2m) 57s ago 24m 70.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (110s) 57s ago 24m 49.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (95s) 57s ago 23m 44.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (58s) 57s ago 23m 13.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b4e0b13e46c0 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (36s) 12s ago 23m 46.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e ddc3d6821c0a 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (14s) 12s ago 23m 12.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f56302ede3ae 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (23m) 12s ago 23m 55.3M 4096M 17.2.0 e1d6a67b021e 5a93d87874cb 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (23m) 12s ago 23m 60.8M 4096M 17.2.0 e1d6a67b021e ff2fce5fbde2 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (4m) 12s ago 22m 52.8M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (22m) 57s ago 22m 96.8M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:16:09.178 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (22m) 12s ago 22m 96.1M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 6 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4, 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 11 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:16:09.440 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:16:09.656 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:16:09.656 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:16:09.656 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:16:09.656 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-09T20:16:09.656 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:16:09.656 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "6/8 daemons upgraded", 2026-03-09T20:16:09.656 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T20:16:09.656 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:16:09.656 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:16:09.657 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:09 vm01.local ceph-mon[97066]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 0 op/s 2026-03-09T20:16:09.657 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:09 vm01.local ceph-mon[98877]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 0 op/s 2026-03-09T20:16:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:09 vm06.local ceph-mon[81259]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 0 op/s 2026-03-09T20:16:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:10 vm06.local ceph-mon[81259]: from='client.44331 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:10 vm06.local ceph-mon[81259]: from='client.44337 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:10 vm06.local ceph-mon[81259]: from='client.34343 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:10 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/2788650785' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:10.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:10 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[97066]: from='client.44331 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[97066]: from='client.44337 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[97066]: from='client.34343 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/2788650785' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[98877]: from='client.44331 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[98877]: from='client.44337 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[98877]: from='client.34343 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/2788650785' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:10.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:10 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:11 vm06.local ceph-mon[81259]: from='client.34349 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:11 vm06.local ceph-mon[81259]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 0 op/s 2026-03-09T20:16:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:11 vm01.local ceph-mon[97066]: from='client.34349 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:11 vm01.local ceph-mon[97066]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 0 op/s 2026-03-09T20:16:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:11 vm01.local ceph-mon[98877]: from='client.34349 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:11 vm01.local ceph-mon[98877]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 0 op/s 2026-03-09T20:16:12.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:16:12 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:16:12] "GET /metrics HTTP/1.1" 200 37879 "" "Prometheus/2.51.0" 2026-03-09T20:16:13.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:13 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:13.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:13 vm06.local ceph-mon[81259]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-09T20:16:13.591 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:13 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:16:13.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:13 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:13.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:13 vm01.local ceph-mon[97066]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-09T20:16:13.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:13 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:16:13.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:13 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:13.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:13 vm01.local ceph-mon[98877]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-09T20:16:13.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:13 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:16:14.410 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local systemd[1]: Stopping Ceph osd.6 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:16:14.410 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[57013]: 2026-03-09T20:16:14.367+0000 7ff18151a700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:16:14.410 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[57013]: 2026-03-09T20:16:14.367+0000 7ff18151a700 -1 osd.6 127 *** Got signal Terminated *** 2026-03-09T20:16:14.410 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[57013]: 2026-03-09T20:16:14.367+0000 7ff18151a700 -1 osd.6 127 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:16:14.662 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:16:14.662 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-mon[81259]: Upgrade: osd.6 is safe to restart 2026-03-09T20:16:14.662 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-mon[81259]: Upgrade: Updating osd.6 2026-03-09T20:16:14.662 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:14.662 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T20:16:14.662 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:14.662 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-mon[81259]: Deploying daemon osd.6 on vm06 2026-03-09T20:16:14.662 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:14 vm06.local ceph-mon[81259]: osd.6 marked itself down and dead 2026-03-09T20:16:14.663 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local podman[96881]: 2026-03-09 20:16:14.462515485 +0000 UTC m=+0.110032679 container died 5a93d87874cbdb5cd5eece2fe993ba352beb58337daa12af4885b7c4f72a3246 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6, RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, release=754, CEPH_POINT_RELEASE=-17.2.0, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, name=centos-stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, version=8, GIT_BRANCH=HEAD, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, ceph=True, distribution-scope=public) 2026-03-09T20:16:14.663 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local podman[96881]: 2026-03-09 20:16:14.498853189 +0000 UTC m=+0.146370383 container remove 5a93d87874cbdb5cd5eece2fe993ba352beb58337daa12af4885b7c4f72a3246 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6, io.k8s.display-name=CentOS Stream 8, architecture=x86_64, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_REPO=https://github.com/ceph/ceph-container.git, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., GIT_CLEAN=True, com.redhat.component=centos-stream-container, distribution-scope=public, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, ceph=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, CEPH_POINT_RELEASE=-17.2.0, release=754, GIT_BRANCH=HEAD, maintainer=Guillaume Abrioux , version=8, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, build-date=2022-05-03T08:36:31.336870, name=centos-stream, vcs-type=git) 2026-03-09T20:16:14.663 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local bash[96881]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6 2026-03-09T20:16:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:16:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[97066]: Upgrade: osd.6 is safe to restart 2026-03-09T20:16:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[97066]: Upgrade: Updating osd.6 2026-03-09T20:16:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T20:16:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[97066]: Deploying daemon osd.6 on vm06 2026-03-09T20:16:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[97066]: osd.6 marked itself down and dead 2026-03-09T20:16:14.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T20:16:14.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[98877]: Upgrade: osd.6 is safe to restart 2026-03-09T20:16:14.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[98877]: Upgrade: Updating osd.6 2026-03-09T20:16:14.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:14.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T20:16:14.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:14.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[98877]: Deploying daemon osd.6 on vm06 2026-03-09T20:16:14.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:14 vm01.local ceph-mon[98877]: osd.6 marked itself down and dead 2026-03-09T20:16:14.964 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local podman[96947]: 2026-03-09 20:16:14.662710741 +0000 UTC m=+0.022248772 container create 64b71a6215874da2f9c7fb7c582fb460cb076460807dd6d98229713f70029d7f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True) 2026-03-09T20:16:14.964 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local podman[96947]: 2026-03-09 20:16:14.704825648 +0000 UTC m=+0.064363690 container init 64b71a6215874da2f9c7fb7c582fb460cb076460807dd6d98229713f70029d7f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:16:14.964 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local podman[96947]: 2026-03-09 20:16:14.708001993 +0000 UTC m=+0.067540035 container start 64b71a6215874da2f9c7fb7c582fb460cb076460807dd6d98229713f70029d7f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, ceph=True) 2026-03-09T20:16:14.964 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local podman[96947]: 2026-03-09 20:16:14.717139545 +0000 UTC m=+0.076677587 container attach 64b71a6215874da2f9c7fb7c582fb460cb076460807dd6d98229713f70029d7f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:16:14.965 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local podman[96947]: 2026-03-09 20:16:14.653699375 +0000 UTC m=+0.013237417 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:16:14.965 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local podman[96967]: 2026-03-09 20:16:14.861641299 +0000 UTC m=+0.011965257 container died 64b71a6215874da2f9c7fb7c582fb460cb076460807dd6d98229713f70029d7f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.license=GPLv2) 2026-03-09T20:16:14.965 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local podman[96967]: 2026-03-09 20:16:14.878036947 +0000 UTC m=+0.028360904 container remove 64b71a6215874da2f9c7fb7c582fb460cb076460807dd6d98229713f70029d7f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, io.buildah.version=1.41.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:16:14.965 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.6.service: Deactivated successfully. 2026-03-09T20:16:14.965 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local systemd[1]: Stopped Ceph osd.6 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:16:14.965 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:14 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.6.service: Consumed 7.275s CPU time. 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local systemd[1]: Starting Ceph osd.6 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local podman[97052]: 2026-03-09 20:16:15.186413796 +0000 UTC m=+0.018426430 container create 1fe795841483a10cca84432b8a26731721be96c1e58eb89281b7b31b42c4de8f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2) 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local podman[97052]: 2026-03-09 20:16:15.228785512 +0000 UTC m=+0.060798146 container init 1fe795841483a10cca84432b8a26731721be96c1e58eb89281b7b31b42c4de8f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local podman[97052]: 2026-03-09 20:16:15.232546362 +0000 UTC m=+0.064558996 container start 1fe795841483a10cca84432b8a26731721be96c1e58eb89281b7b31b42c4de8f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local podman[97052]: 2026-03-09 20:16:15.239138341 +0000 UTC m=+0.071150975 container attach 1fe795841483a10cca84432b8a26731721be96c1e58eb89281b7b31b42c4de8f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local podman[97052]: 2026-03-09 20:16:15.17909685 +0000 UTC m=+0.011109495 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local bash[97052]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:15.330 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local bash[97052]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-mon[81259]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:16:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-mon[81259]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:16:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-mon[81259]: osdmap e128: 8 total, 7 up, 8 in 2026-03-09T20:16:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:15 vm01.local ceph-mon[97066]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:16:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:15 vm01.local ceph-mon[97066]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:16:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:15 vm01.local ceph-mon[97066]: osdmap e128: 8 total, 7 up, 8 in 2026-03-09T20:16:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:15 vm01.local ceph-mon[98877]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:16:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:15 vm01.local ceph-mon[98877]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:16:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:15 vm01.local ceph-mon[98877]: osdmap e128: 8 total, 7 up, 8 in 2026-03-09T20:16:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:16:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local bash[97052]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:16:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local bash[97052]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local bash[97052]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T20:16:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local bash[97052]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T20:16:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5228a0a9-cca3-41c4-b902-5ce8a86637e6/osd-block-021b1d92-8c4d-47f3-aa12-6b46528c6c64 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T20:16:16.093 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:15 vm06.local bash[97052]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5228a0a9-cca3-41c4-b902-5ce8a86637e6/osd-block-021b1d92-8c4d-47f3-aa12-6b46528c6c64 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T20:16:16.445 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:16 vm06.local ceph-mon[81259]: osdmap e129: 8 total, 7 up, 8 in 2026-03-09T20:16:16.445 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/ln -snf /dev/ceph-5228a0a9-cca3-41c4-b902-5ce8a86637e6/osd-block-021b1d92-8c4d-47f3-aa12-6b46528c6c64 /var/lib/ceph/osd/ceph-6/block 2026-03-09T20:16:16.445 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local bash[97052]: Running command: /usr/bin/ln -snf /dev/ceph-5228a0a9-cca3-41c4-b902-5ce8a86637e6/osd-block-021b1d92-8c4d-47f3-aa12-6b46528c6c64 /var/lib/ceph/osd/ceph-6/block 2026-03-09T20:16:16.445 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T20:16:16.445 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local bash[97052]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T20:16:16.445 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T20:16:16.445 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local bash[97052]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T20:16:16.445 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local bash[97052]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate[97063]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local bash[97052]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local conmon[97063]: conmon 1fe795841483a10cca84 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-1fe795841483a10cca84432b8a26731721be96c1e58eb89281b7b31b42c4de8f.scope/container/memory.events 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local podman[97052]: 2026-03-09 20:16:16.220419765 +0000 UTC m=+1.052432399 container died 1fe795841483a10cca84432b8a26731721be96c1e58eb89281b7b31b42c4de8f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3) 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local podman[97052]: 2026-03-09 20:16:16.247619043 +0000 UTC m=+1.079631677 container remove 1fe795841483a10cca84432b8a26731721be96c1e58eb89281b7b31b42c4de8f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-activate, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local podman[97310]: 2026-03-09 20:16:16.380060634 +0000 UTC m=+0.038732176 container create 848df75f9503c4a0162a5e598f6553a217482cf195a830344d60376e6c911019 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2) 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local podman[97310]: 2026-03-09 20:16:16.415145333 +0000 UTC m=+0.073816875 container init 848df75f9503c4a0162a5e598f6553a217482cf195a830344d60376e6c911019 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2) 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local podman[97310]: 2026-03-09 20:16:16.418887306 +0000 UTC m=+0.077558848 container start 848df75f9503c4a0162a5e598f6553a217482cf195a830344d60376e6c911019 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2) 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local bash[97310]: 848df75f9503c4a0162a5e598f6553a217482cf195a830344d60376e6c911019 2026-03-09T20:16:16.446 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local podman[97310]: 2026-03-09 20:16:16.364476556 +0000 UTC m=+0.023148108 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:16:16.775 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local systemd[1]: Started Ceph osd.6 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:16:16.776 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:16:16.773+0000 7f9418c24740 -1 Falling back to public interface 2026-03-09T20:16:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:16 vm01.local ceph-mon[97066]: osdmap e129: 8 total, 7 up, 8 in 2026-03-09T20:16:16.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:16 vm01.local ceph-mon[98877]: osdmap e129: 8 total, 7 up, 8 in 2026-03-09T20:16:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:17.008Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:17.008Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:17.649 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:17 vm06.local ceph-mon[81259]: pgmap v124: 161 pgs: 10 active+undersized, 16 peering, 5 stale+active+clean, 5 active+undersized+degraded, 125 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 25/627 objects degraded (3.987%) 2026-03-09T20:16:17.649 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:17 vm06.local ceph-mon[81259]: Health check failed: Reduced data availability: 1 pg inactive, 2 pgs peering (PG_AVAILABILITY) 2026-03-09T20:16:17.649 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:17 vm06.local ceph-mon[81259]: Health check failed: Degraded data redundancy: 25/627 objects degraded (3.987%), 5 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:17.649 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:17 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:17.649 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:17 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[97066]: pgmap v124: 161 pgs: 10 active+undersized, 16 peering, 5 stale+active+clean, 5 active+undersized+degraded, 125 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 25/627 objects degraded (3.987%) 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[97066]: Health check failed: Reduced data availability: 1 pg inactive, 2 pgs peering (PG_AVAILABILITY) 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[97066]: Health check failed: Degraded data redundancy: 25/627 objects degraded (3.987%), 5 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[98877]: pgmap v124: 161 pgs: 10 active+undersized, 16 peering, 5 stale+active+clean, 5 active+undersized+degraded, 125 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 25/627 objects degraded (3.987%) 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[98877]: Health check failed: Reduced data availability: 1 pg inactive, 2 pgs peering (PG_AVAILABILITY) 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[98877]: Health check failed: Degraded data redundancy: 25/627 objects degraded (3.987%), 5 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:17.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:17 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:18.273 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:18 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:16:18.072+0000 7f9418c24740 -1 osd.6 0 read_superblock omap replica is missing. 2026-03-09T20:16:18.273 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:18 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:16:18.088+0000 7f9418c24740 -1 osd.6 127 log_to_monitors true 2026-03-09T20:16:18.595 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:18 vm06.local ceph-mon[81259]: from='osd.6 [v2:192.168.123.106:6816/2565579033,v1:192.168.123.106:6817/2565579033]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T20:16:18.595 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:18 vm06.local ceph-mon[81259]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T20:16:18.595 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:18 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:18.595 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:18 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:18.595 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:16:18 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:16:18.468+0000 7f94109cf640 -1 osd.6 127 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T20:16:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:18 vm01.local ceph-mon[97066]: from='osd.6 [v2:192.168.123.106:6816/2565579033,v1:192.168.123.106:6817/2565579033]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T20:16:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:18 vm01.local ceph-mon[97066]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T20:16:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:18 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:18 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:18.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:18 vm01.local ceph-mon[98877]: from='osd.6 [v2:192.168.123.106:6816/2565579033,v1:192.168.123.106:6817/2565579033]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T20:16:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:18 vm01.local ceph-mon[98877]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T20:16:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:18 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:18 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:19.719 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:19 vm06.local ceph-mon[81259]: pgmap v125: 161 pgs: 21 active+undersized, 16 peering, 2 stale+active+clean, 13 active+undersized+degraded, 109 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 49/627 objects degraded (7.815%) 2026-03-09T20:16:19.719 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:19 vm06.local ceph-mon[81259]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T20:16:19.719 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:19 vm06.local ceph-mon[81259]: osdmap e130: 8 total, 7 up, 8 in 2026-03-09T20:16:19.719 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:19 vm06.local ceph-mon[81259]: from='osd.6 [v2:192.168.123.106:6816/2565579033,v1:192.168.123.106:6817/2565579033]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:16:19.719 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:19 vm06.local ceph-mon[81259]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:16:19.719 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:19 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:19.719 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:19 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:19.835 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[97066]: pgmap v125: 161 pgs: 21 active+undersized, 16 peering, 2 stale+active+clean, 13 active+undersized+degraded, 109 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 49/627 objects degraded (7.815%) 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[97066]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[97066]: osdmap e130: 8 total, 7 up, 8 in 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[97066]: from='osd.6 [v2:192.168.123.106:6816/2565579033,v1:192.168.123.106:6817/2565579033]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[97066]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[98877]: pgmap v125: 161 pgs: 21 active+undersized, 16 peering, 2 stale+active+clean, 13 active+undersized+degraded, 109 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 49/627 objects degraded (7.815%) 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[98877]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[98877]: osdmap e130: 8 total, 7 up, 8 in 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[98877]: from='osd.6 [v2:192.168.123.106:6816/2565579033,v1:192.168.123.106:6817/2565579033]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[98877]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:19.836 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:16:19 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:16:19.834+0000 7f1345fe3640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T20:16:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: osd.6 [v2:192.168.123.106:6816/2565579033,v1:192.168.123.106:6817/2565579033] boot 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: osdmap e131: 8 total, 8 up, 8 in 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:20.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:20 vm06.local ceph-mon[81259]: osdmap e132: 8 total, 8 up, 8 in 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: osd.6 [v2:192.168.123.106:6816/2565579033,v1:192.168.123.106:6817/2565579033] boot 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: osdmap e131: 8 total, 8 up, 8 in 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[97066]: osdmap e132: 8 total, 8 up, 8 in 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: osd.6 [v2:192.168.123.106:6816/2565579033,v1:192.168.123.106:6817/2565579033] boot 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: osdmap e131: 8 total, 8 up, 8 in 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T20:16:20.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:20.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:20 vm01.local ceph-mon[98877]: osdmap e132: 8 total, 8 up, 8 in 2026-03-09T20:16:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:21 vm06.local ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:21 vm06.local ceph-mon[81259]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T20:16:21.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:21 vm06.local ceph-mon[81259]: pgmap v128: 161 pgs: 25 active+undersized, 16 peering, 14 active+undersized+degraded, 106 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 51/627 objects degraded (8.134%) 2026-03-09T20:16:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:21 vm01.local ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:21 vm01.local ceph-mon[97066]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T20:16:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:21 vm01.local ceph-mon[97066]: pgmap v128: 161 pgs: 25 active+undersized, 16 peering, 14 active+undersized+degraded, 106 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 51/627 objects degraded (8.134%) 2026-03-09T20:16:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:21 vm01.local ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:21 vm01.local ceph-mon[98877]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T20:16:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:21 vm01.local ceph-mon[98877]: pgmap v128: 161 pgs: 25 active+undersized, 16 peering, 14 active+undersized+degraded, 106 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 51/627 objects degraded (8.134%) 2026-03-09T20:16:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:22 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:22.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:22 vm06.local ceph-mon[81259]: pgmap v130: 161 pgs: 11 active+undersized, 16 peering, 7 active+undersized+degraded, 127 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 16/627 objects degraded (2.552%) 2026-03-09T20:16:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:16:22 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:16:22] "GET /metrics HTTP/1.1" 200 37763 "" "Prometheus/2.51.0" 2026-03-09T20:16:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:22 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:22.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:22 vm01.local ceph-mon[97066]: pgmap v130: 161 pgs: 11 active+undersized, 16 peering, 7 active+undersized+degraded, 127 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 16/627 objects degraded (2.552%) 2026-03-09T20:16:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:22 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:22.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:22 vm01.local ceph-mon[98877]: pgmap v130: 161 pgs: 11 active+undersized, 16 peering, 7 active+undersized+degraded, 127 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 16/627 objects degraded (2.552%) 2026-03-09T20:16:23.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:23 vm06.local ceph-mon[81259]: Health check update: Degraded data redundancy: 16/627 objects degraded (2.552%), 7 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:23.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:23 vm01.local ceph-mon[97066]: Health check update: Degraded data redundancy: 16/627 objects degraded (2.552%), 7 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:23.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:23 vm01.local ceph-mon[98877]: Health check update: Degraded data redundancy: 16/627 objects degraded (2.552%), 7 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:24.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:24 vm06.local ceph-mon[81259]: pgmap v131: 161 pgs: 9 peering, 152 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:24.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:24 vm01.local ceph-mon[97066]: pgmap v131: 161 pgs: 9 peering, 152 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:24.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:24 vm01.local ceph-mon[98877]: pgmap v131: 161 pgs: 9 peering, 152 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:25.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:25 vm06.local ceph-mon[81259]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 2 pgs peering) 2026-03-09T20:16:25.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:25 vm06.local ceph-mon[81259]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 16/627 objects degraded (2.552%), 7 pgs degraded) 2026-03-09T20:16:25.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:25 vm06.local ceph-mon[81259]: Cluster is now healthy 2026-03-09T20:16:25.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:25 vm01.local ceph-mon[97066]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 2 pgs peering) 2026-03-09T20:16:25.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:25 vm01.local ceph-mon[97066]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 16/627 objects degraded (2.552%), 7 pgs degraded) 2026-03-09T20:16:25.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:25 vm01.local ceph-mon[97066]: Cluster is now healthy 2026-03-09T20:16:25.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:25 vm01.local ceph-mon[98877]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 2 pgs peering) 2026-03-09T20:16:25.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:25 vm01.local ceph-mon[98877]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 16/627 objects degraded (2.552%), 7 pgs degraded) 2026-03-09T20:16:25.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:25 vm01.local ceph-mon[98877]: Cluster is now healthy 2026-03-09T20:16:26.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:26 vm06.local ceph-mon[81259]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:26.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:26 vm01.local ceph-mon[97066]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:26.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:26 vm01.local ceph-mon[98877]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:16:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:27.008Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:27.009Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:29.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:29 vm06.local ceph-mon[81259]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 354 B/s rd, 0 op/s 2026-03-09T20:16:29.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:29 vm01.local ceph-mon[97066]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 354 B/s rd, 0 op/s 2026-03-09T20:16:29.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:29 vm01.local ceph-mon[98877]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 354 B/s rd, 0 op/s 2026-03-09T20:16:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:31 vm06.local ceph-mon[81259]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 0 op/s 2026-03-09T20:16:31.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:31 vm01.local ceph-mon[97066]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 0 op/s 2026-03-09T20:16:31.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:31 vm01.local ceph-mon[98877]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 0 op/s 2026-03-09T20:16:32.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:16:32 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:16:32] "GET /metrics HTTP/1.1" 200 37763 "" "Prometheus/2.51.0" 2026-03-09T20:16:33.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:33 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:33.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:33 vm01.local ceph-mon[97066]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 0 op/s 2026-03-09T20:16:33.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:33 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:33.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:33 vm01.local ceph-mon[98877]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 0 op/s 2026-03-09T20:16:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:33 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:33.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:33 vm06.local ceph-mon[81259]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 0 op/s 2026-03-09T20:16:35.602 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-mon[81259]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-09T20:16:35.602 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:35.602 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:35.602 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:35.602 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:35.602 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T20:16:35.602 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:35.602 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[97066]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[98877]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:36.343 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:35 vm06.local systemd[1]: Stopping Ceph osd.7 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:16:36.343 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[59764]: 2026-03-09T20:16:35.962+0000 7fdeade5d700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:16:36.343 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[59764]: 2026-03-09T20:16:35.962+0000 7fdeade5d700 -1 osd.7 132 *** Got signal Terminated *** 2026-03-09T20:16:36.343 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:35 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[59764]: 2026-03-09T20:16:35.962+0000 7fdeade5d700 -1 osd.7 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:16:36.620 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:36 vm06.local ceph-mon[81259]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:36.620 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:36 vm06.local ceph-mon[81259]: Upgrade: osd.7 is safe to restart 2026-03-09T20:16:36.620 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:36 vm06.local ceph-mon[81259]: Upgrade: Updating osd.7 2026-03-09T20:16:36.620 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:36 vm06.local ceph-mon[81259]: Deploying daemon osd.7 on vm06 2026-03-09T20:16:36.620 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:36 vm06.local ceph-mon[81259]: osd.7 marked itself down and dead 2026-03-09T20:16:36.620 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local podman[100663]: 2026-03-09 20:16:36.436340795 +0000 UTC m=+0.486893406 container died ff2fce5fbde2f0f1340755d95ecb0c7e988dbc9252fbe8c2d4ea0f70a92e5cb1 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7, vcs-type=git, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.expose-services=, name=centos-stream, vendor=Red Hat, Inc., GIT_BRANCH=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, distribution-scope=public, release=754, CEPH_POINT_RELEASE=-17.2.0, version=8, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, io.k8s.display-name=CentOS Stream 8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, RELEASE=HEAD) 2026-03-09T20:16:36.620 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local podman[100663]: 2026-03-09 20:16:36.456085731 +0000 UTC m=+0.506638352 container remove ff2fce5fbde2f0f1340755d95ecb0c7e988dbc9252fbe8c2d4ea0f70a92e5cb1 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7, vcs-type=git, maintainer=Guillaume Abrioux , com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, release=754, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, ceph=True, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, name=centos-stream, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, version=8, com.redhat.component=centos-stream-container, io.openshift.tags=base centos centos-stream, CEPH_POINT_RELEASE=-17.2.0, architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vendor=Red Hat, Inc.) 2026-03-09T20:16:36.620 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local bash[100663]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[97066]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[97066]: Upgrade: osd.7 is safe to restart 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[97066]: Upgrade: Updating osd.7 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[97066]: Deploying daemon osd.7 on vm06 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[97066]: osd.7 marked itself down and dead 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[98877]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[98877]: Upgrade: osd.7 is safe to restart 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[98877]: Upgrade: Updating osd.7 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[98877]: Deploying daemon osd.7 on vm06 2026-03-09T20:16:36.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:36 vm01.local ceph-mon[98877]: osd.7 marked itself down and dead 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local podman[100730]: 2026-03-09 20:16:36.620200022 +0000 UTC m=+0.021912723 container create 391233c5e9338e526ca6cf55afc3336bbc3e9a484aebb5d1d5c22b24c1465ab6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.41.3) 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local podman[100730]: 2026-03-09 20:16:36.667313375 +0000 UTC m=+0.069026086 container init 391233c5e9338e526ca6cf55afc3336bbc3e9a484aebb5d1d5c22b24c1465ab6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-deactivate, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local podman[100730]: 2026-03-09 20:16:36.671413239 +0000 UTC m=+0.073125940 container start 391233c5e9338e526ca6cf55afc3336bbc3e9a484aebb5d1d5c22b24c1465ab6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-deactivate, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local podman[100730]: 2026-03-09 20:16:36.675970919 +0000 UTC m=+0.077683620 container attach 391233c5e9338e526ca6cf55afc3336bbc3e9a484aebb5d1d5c22b24c1465ab6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-deactivate, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local podman[100730]: 2026-03-09 20:16:36.610839814 +0000 UTC m=+0.012552524 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local podman[100730]: 2026-03-09 20:16:36.813657546 +0000 UTC m=+0.215370247 container died 391233c5e9338e526ca6cf55afc3336bbc3e9a484aebb5d1d5c22b24c1465ab6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local podman[100730]: 2026-03-09 20:16:36.832669641 +0000 UTC m=+0.234382342 container remove 391233c5e9338e526ca6cf55afc3336bbc3e9a484aebb5d1d5c22b24c1465ab6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-deactivate, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.7.service: Deactivated successfully. 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local systemd[1]: Stopped Ceph osd.7 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:16:36.922 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:36 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.7.service: Consumed 8.146s CPU time. 2026-03-09T20:16:37.279 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local systemd[1]: Starting Ceph osd.7 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:16:37.279 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local podman[100831]: 2026-03-09 20:16:37.134296524 +0000 UTC m=+0.019345421 container create 3fa11475a42579ec1e12cb70dff90d219c08e8e90e26f1961fdb1cdc2906f313 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-09T20:16:37.279 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local podman[100831]: 2026-03-09 20:16:37.178801924 +0000 UTC m=+0.063850831 container init 3fa11475a42579ec1e12cb70dff90d219c08e8e90e26f1961fdb1cdc2906f313 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:16:37.279 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local podman[100831]: 2026-03-09 20:16:37.181633534 +0000 UTC m=+0.066682431 container start 3fa11475a42579ec1e12cb70dff90d219c08e8e90e26f1961fdb1cdc2906f313 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:16:37.280 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local podman[100831]: 2026-03-09 20:16:37.188136425 +0000 UTC m=+0.073185322 container attach 3fa11475a42579ec1e12cb70dff90d219c08e8e90e26f1961fdb1cdc2906f313 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=squid, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:16:37.280 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local podman[100831]: 2026-03-09 20:16:37.126901463 +0000 UTC m=+0.011950369 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:16:37.280 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:37.280 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local bash[100831]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:37.280 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:37.009Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:37.010Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:37.592 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local bash[100831]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-mon[81259]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:16:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-mon[81259]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:16:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-mon[81259]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-09T20:16:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-mon[81259]: osdmap e133: 8 total, 7 up, 8 in 2026-03-09T20:16:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-mon[97066]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:16:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-mon[97066]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:16:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-mon[97066]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-09T20:16:37.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-mon[97066]: osdmap e133: 8 total, 7 up, 8 in 2026-03-09T20:16:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-mon[98877]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:16:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-mon[98877]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T20:16:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-mon[98877]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-09T20:16:37.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:37 vm01.local ceph-mon[98877]: osdmap e133: 8 total, 7 up, 8 in 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local bash[100831]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local bash[100831]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local bash[100831]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local bash[100831]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5047c633-70af-4c80-b4a8-102161383545/osd-block-0e4d7562-913f-4157-b012-dd8565afc290 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-09T20:16:38.093 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:37 vm06.local bash[100831]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5047c633-70af-4c80-b4a8-102161383545/osd-block-0e4d7562-913f-4157-b012-dd8565afc290 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-09T20:16:38.366 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/ln -snf /dev/ceph-5047c633-70af-4c80-b4a8-102161383545/osd-block-0e4d7562-913f-4157-b012-dd8565afc290 /var/lib/ceph/osd/ceph-7/block 2026-03-09T20:16:38.366 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local bash[100831]: Running command: /usr/bin/ln -snf /dev/ceph-5047c633-70af-4c80-b4a8-102161383545/osd-block-0e4d7562-913f-4157-b012-dd8565afc290 /var/lib/ceph/osd/ceph-7/block 2026-03-09T20:16:38.366 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local bash[100831]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local bash[100831]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local bash[100831]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate[100842]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local bash[100831]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local podman[101037]: 2026-03-09 20:16:38.173067714 +0000 UTC m=+0.015238793 container died 3fa11475a42579ec1e12cb70dff90d219c08e8e90e26f1961fdb1cdc2906f313 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3) 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local podman[101037]: 2026-03-09 20:16:38.191479927 +0000 UTC m=+0.033650995 container remove 3fa11475a42579ec1e12cb70dff90d219c08e8e90e26f1961fdb1cdc2906f313 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS) 2026-03-09T20:16:38.367 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local podman[101076]: 2026-03-09 20:16:38.332251437 +0000 UTC m=+0.040616212 container create 144cd471498b1274f35d1fc96dbac65e0f5c3812214226f7bc4a1ea9ad009c5f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:16:38.731 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:38 vm06.local ceph-mon[81259]: osdmap e134: 8 total, 7 up, 8 in 2026-03-09T20:16:38.731 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local podman[101076]: 2026-03-09 20:16:38.37279339 +0000 UTC m=+0.081158155 container init 144cd471498b1274f35d1fc96dbac65e0f5c3812214226f7bc4a1ea9ad009c5f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True) 2026-03-09T20:16:38.731 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local podman[101076]: 2026-03-09 20:16:38.3758711 +0000 UTC m=+0.084235875 container start 144cd471498b1274f35d1fc96dbac65e0f5c3812214226f7bc4a1ea9ad009c5f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-09T20:16:38.731 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local bash[101076]: 144cd471498b1274f35d1fc96dbac65e0f5c3812214226f7bc4a1ea9ad009c5f 2026-03-09T20:16:38.731 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local podman[101076]: 2026-03-09 20:16:38.318310343 +0000 UTC m=+0.026675118 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:16:38.731 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:38 vm06.local systemd[1]: Started Ceph osd.7 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:16:38.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:38 vm01.local ceph-mon[97066]: osdmap e134: 8 total, 7 up, 8 in 2026-03-09T20:16:38.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:38 vm01.local ceph-mon[98877]: osdmap e134: 8 total, 7 up, 8 in 2026-03-09T20:16:39.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:39 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:16:39.208+0000 7f4c775af740 -1 Falling back to public interface 2026-03-09T20:16:39.618 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:39 vm06.local ceph-mon[81259]: pgmap v140: 161 pgs: 7 active+undersized, 20 stale+active+clean, 6 active+undersized+degraded, 128 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 19/627 objects degraded (3.030%) 2026-03-09T20:16:39.618 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:39 vm06.local ceph-mon[81259]: Health check failed: Degraded data redundancy: 19/627 objects degraded (3.030%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:39.618 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:39 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:39.618 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:39 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:39.622 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:39 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:16:39.586+0000 7f4c775af740 -1 osd.7 0 read_superblock omap replica is missing. 2026-03-09T20:16:39.622 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:39 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:16:39.614+0000 7f4c775af740 -1 osd.7 132 log_to_monitors true 2026-03-09T20:16:39.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:39 vm01.local ceph-mon[98877]: pgmap v140: 161 pgs: 7 active+undersized, 20 stale+active+clean, 6 active+undersized+degraded, 128 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 19/627 objects degraded (3.030%) 2026-03-09T20:16:39.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:39 vm01.local ceph-mon[98877]: Health check failed: Degraded data redundancy: 19/627 objects degraded (3.030%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:39.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:39 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:39.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:39 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:39.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:39 vm01.local ceph-mon[97066]: pgmap v140: 161 pgs: 7 active+undersized, 20 stale+active+clean, 6 active+undersized+degraded, 128 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 19/627 objects degraded (3.030%) 2026-03-09T20:16:39.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:39 vm01.local ceph-mon[97066]: Health check failed: Degraded data redundancy: 19/627 objects degraded (3.030%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:39.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:39 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:39.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:39 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:39.903 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:16:40.337 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:16:40.337 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (16m) 88s ago 23m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:16:40.337 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (4m) 0s ago 23m 76.8M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:16:40.337 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (4m) 88s ago 23m 52.5M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:16:40.337 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (4m) 0s ago 24m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (13m) 88s ago 25m 559M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (3m) 88s ago 25m 53.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (4m) 0s ago 25m 49.2M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (3m) 88s ago 25m 42.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (16m) 88s ago 23m 10.4M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (16m) 0s ago 23m 10.1M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (2m) 88s ago 24m 70.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (2m) 88s ago 24m 49.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (2m) 88s ago 24m 44.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (89s) 88s ago 24m 13.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b4e0b13e46c0 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (67s) 0s ago 24m 48.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e ddc3d6821c0a 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (45s) 0s ago 24m 46.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f56302ede3ae 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (23s) 0s ago 23m 44.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 848df75f9503 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (2s) 0s ago 23m 12.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 144cd471498b 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (4m) 0s ago 23m 52.9M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (23m) 88s ago 23m 96.8M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:16:40.338 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (23m) 0s ago 23m 96.4M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:16:40.615 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[97066]: from='osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T20:16:40.615 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.615 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.615 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.615 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.616 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[98877]: from='osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T20:16:40.616 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.616 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.616 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.616 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:40 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 7 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 12 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:16:40.616 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:16:40.836 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:16:40.836 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:16:40.837 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:16:40.837 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-09T20:16:40.837 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [ 2026-03-09T20:16:40.837 INFO:teuthology.orchestra.run.vm01.stdout: "osd" 2026-03-09T20:16:40.837 INFO:teuthology.orchestra.run.vm01.stdout: ], 2026-03-09T20:16:40.837 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "8/8 daemons upgraded", 2026-03-09T20:16:40.837 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T20:16:40.837 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:16:40.837 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:16:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:40 vm06.local ceph-mon[81259]: from='osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T20:16:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:40 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:40 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:40 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:40 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:40.842 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:16:40 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:16:40.437+0000 7f4c6f35a640 -1 osd.7 132 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='client.54355 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='client.44367 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: pgmap v141: 161 pgs: 28 active+undersized, 5 stale+active+clean, 18 active+undersized+degraded, 110 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 62/627 objects degraded (9.888%) 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='client.54364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: osdmap e135: 8 total, 7 up, 8 in 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/2920401690' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:16:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-09T20:16:41.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:41 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-09T20:16:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='client.54355 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='client.44367 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: pgmap v141: 161 pgs: 28 active+undersized, 5 stale+active+clean, 18 active+undersized+degraded, 110 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 62/627 objects degraded (9.888%) 2026-03-09T20:16:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='client.54364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T20:16:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: osdmap e135: 8 total, 7 up, 8 in 2026-03-09T20:16:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:16:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/2920401690' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='client.54355 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='client.44367 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: pgmap v141: 161 pgs: 28 active+undersized, 5 stale+active+clean, 18 active+undersized+degraded, 110 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 62/627 objects degraded (9.888%) 2026-03-09T20:16:41.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='client.54364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: osdmap e135: 8 total, 7 up, 8 in 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/2920401690' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-09T20:16:41.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:41 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-09T20:16:42.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='client.34379 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:42.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all osd 2026-03-09T20:16:42.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: Upgrade: Setting require_osd_release to 19 squid 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081] boot 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: osdmap e136: 8 total, 8 up, 8 in 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all mds 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T20:16:42.844 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:42 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='client.34379 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all osd 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: Upgrade: Setting require_osd_release to 19 squid 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081] boot 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: osdmap e136: 8 total, 8 up, 8 in 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all mds 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:16:42] "GET /metrics HTTP/1.1" 200 37909 "" "Prometheus/2.51.0" 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='client.34379 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all osd 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: Upgrade: Setting require_osd_release to 19 squid 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: osd.7 [v2:192.168.123.106:6824/1544000081,v1:192.168.123.106:6825/1544000081] boot 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: osdmap e136: 8 total, 8 up, 8 in 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all mds 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T20:16:42.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:42 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all nfs 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: Upgrade: Finalizing container_image settings 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: pgmap v144: 161 pgs: 40 active+undersized, 25 active+undersized+degraded, 96 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 80/627 objects degraded (12.759%) 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: osdmap e137: 8 total, 8 up, 8 in 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: Upgrade: Complete! 2026-03-09T20:16:43.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:16:43.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:16:43.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:43.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:43.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:43.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:43.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:43.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:43 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all nfs 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: Upgrade: Finalizing container_image settings 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: pgmap v144: 161 pgs: 40 active+undersized, 25 active+undersized+degraded, 96 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 80/627 objects degraded (12.759%) 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: osdmap e137: 8 total, 8 up, 8 in 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: Upgrade: Complete! 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all nfs 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: Upgrade: Finalizing container_image settings 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: pgmap v144: 161 pgs: 40 active+undersized, 25 active+undersized+degraded, 96 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 80/627 objects degraded (12.759%) 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: osdmap e137: 8 total, 8 up, 8 in 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: Upgrade: Complete! 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:16:43.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:43 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:44.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:44 vm06.local ceph-mon[81259]: Health check update: Degraded data redundancy: 58/627 objects degraded (9.250%), 17 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:44.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:44 vm01.local ceph-mon[97066]: Health check update: Degraded data redundancy: 58/627 objects degraded (9.250%), 17 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:44.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:44 vm01.local ceph-mon[98877]: Health check update: Degraded data redundancy: 58/627 objects degraded (9.250%), 17 pgs degraded (PG_DEGRADED) 2026-03-09T20:16:45.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:45 vm06.local ceph-mon[81259]: pgmap v146: 161 pgs: 22 active+undersized, 17 active+undersized+degraded, 122 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 58/627 objects degraded (9.250%) 2026-03-09T20:16:45.772 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:45 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:45.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:45 vm01.local ceph-mon[97066]: pgmap v146: 161 pgs: 22 active+undersized, 17 active+undersized+degraded, 122 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 58/627 objects degraded (9.250%) 2026-03-09T20:16:45.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:45 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:45.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:45 vm01.local ceph-mon[98877]: pgmap v146: 161 pgs: 22 active+undersized, 17 active+undersized+degraded, 122 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 58/627 objects degraded (9.250%) 2026-03-09T20:16:45.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:45 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:47 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:47.009Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:47 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:47.010Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:47 vm06.local ceph-mon[81259]: pgmap v147: 161 pgs: 7 active+undersized, 4 active+undersized+degraded, 150 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 7/627 objects degraded (1.116%) 2026-03-09T20:16:47.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:47 vm01.local ceph-mon[97066]: pgmap v147: 161 pgs: 7 active+undersized, 4 active+undersized+degraded, 150 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 7/627 objects degraded (1.116%) 2026-03-09T20:16:47.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:47 vm01.local ceph-mon[98877]: pgmap v147: 161 pgs: 7 active+undersized, 4 active+undersized+degraded, 150 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 7/627 objects degraded (1.116%) 2026-03-09T20:16:49.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:48 vm06.local ceph-mon[81259]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:16:49.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:48 vm06.local ceph-mon[81259]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 7/627 objects degraded (1.116%), 4 pgs degraded) 2026-03-09T20:16:49.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:48 vm06.local ceph-mon[81259]: Cluster is now healthy 2026-03-09T20:16:49.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:48 vm01.local ceph-mon[97066]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:16:49.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:48 vm01.local ceph-mon[97066]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 7/627 objects degraded (1.116%), 4 pgs degraded) 2026-03-09T20:16:49.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:48 vm01.local ceph-mon[97066]: Cluster is now healthy 2026-03-09T20:16:49.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:48 vm01.local ceph-mon[98877]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:16:49.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:48 vm01.local ceph-mon[98877]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 7/627 objects degraded (1.116%), 4 pgs degraded) 2026-03-09T20:16:49.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:48 vm01.local ceph-mon[98877]: Cluster is now healthy 2026-03-09T20:16:50.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:50 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:50.427 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:50 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:50 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:16:51.493 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:51 vm06.local ceph-mon[81259]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:16:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:51 vm01.local ceph-mon[97066]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:16:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:51 vm01.local ceph-mon[98877]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T20:16:52.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:16:52 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:16:52] "GET /metrics HTTP/1.1" 200 37945 "" "Prometheus/2.51.0" 2026-03-09T20:16:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:53 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:53 vm01.local ceph-mon[97066]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:16:53.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:53 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:53.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:53 vm01.local ceph-mon[98877]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:16:53.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:53 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:16:53.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:53 vm06.local ceph-mon[81259]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T20:16:55.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:55 vm01.local ceph-mon[97066]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:16:55.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:55 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:55.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:55 vm01.local ceph-mon[98877]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:16:55.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:55 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:55 vm06.local ceph-mon[81259]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:16:55.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:55 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:16:57.377 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:57.010Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:57.377 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:16:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:16:57.011Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:16:57.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:57 vm01.local ceph-mon[97066]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:16:57.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:57 vm01.local ceph-mon[98877]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:16:57.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:57 vm06.local ceph-mon[81259]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:16:59.786 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:16:59 vm01.local ceph-mon[97066]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:16:59.786 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:16:59 vm01.local ceph-mon[98877]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:16:59.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:16:59 vm06.local ceph-mon[81259]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:01.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:01 vm06.local ceph-mon[81259]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:01.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:01 vm01.local ceph-mon[97066]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:01.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:01 vm01.local ceph-mon[98877]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:02.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:17:02 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:17:02] "GET /metrics HTTP/1.1" 200 37945 "" "Prometheus/2.51.0" 2026-03-09T20:17:03.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:03 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:03.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:03 vm06.local ceph-mon[81259]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:03.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:03 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:03.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:03 vm01.local ceph-mon[97066]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:03.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:03 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:03.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:03 vm01.local ceph-mon[98877]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:05 vm06.local ceph-mon[81259]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:05.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:05 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:05.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:05 vm01.local ceph-mon[97066]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:05.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:05 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:05.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:05 vm01.local ceph-mon[98877]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:05.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:05 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:07 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:07.011Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:07 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:07.011Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:07 vm06.local ceph-mon[81259]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:07.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:07 vm01.local ceph-mon[97066]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:07.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:07 vm01.local ceph-mon[98877]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:09 vm06.local ceph-mon[81259]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:09.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:09 vm01.local ceph-mon[97066]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:09.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:09 vm01.local ceph-mon[98877]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:11.129 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:17:11.310 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:11 vm01.local ceph-mon[97066]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:11.310 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:11 vm01.local ceph-mon[98877]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:11.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:11 vm06.local ceph-mon[81259]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (17m) 119s ago 23m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (4m) 31s ago 23m 76.8M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (5m) 119s ago 23m 52.5M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (5m) 31s ago 25m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (13m) 119s ago 26m 559M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (3m) 119s ago 26m 53.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (4m) 31s ago 25m 49.2M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (3m) 119s ago 25m 42.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (17m) 119s ago 24m 10.4M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (17m) 31s ago 23m 10.1M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (3m) 119s ago 25m 70.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (2m) 119s ago 25m 49.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (2m) 119s ago 25m 44.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (2m) 119s ago 24m 13.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b4e0b13e46c0 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (98s) 31s ago 24m 48.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e ddc3d6821c0a 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (76s) 31s ago 24m 46.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f56302ede3ae 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (55s) 31s ago 24m 44.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 848df75f9503 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (33s) 31s ago 24m 12.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 144cd471498b 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (5m) 31s ago 23m 52.9M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (23m) 119s ago 23m 96.8M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:17:11.640 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (23m) 31s ago 23m 96.4M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:17:11.703 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | length == 1'"'"'' 2026-03-09T20:17:12.308 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:12 vm01.local ceph-mon[97066]: from='client.54382 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:12.308 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:12 vm01.local ceph-mon[98877]: from='client.54382 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:12.308 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:17:12.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:12 vm06.local ceph-mon[81259]: from='client.54382 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:12.366 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | keys'"'"' | grep $sha1' 2026-03-09T20:17:12.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:17:12 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:17:12] "GET /metrics HTTP/1.1" 200 37938 "" "Prometheus/2.51.0" 2026-03-09T20:17:12.902 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T20:17:12.935 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[97066]: from='client.54388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[97066]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/2441276346' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/2497520483' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[98877]: from='client.54388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[98877]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/2441276346' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:13.168 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:13 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/2497520483' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:13 vm06.local ceph-mon[81259]: from='client.54388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:13 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:13 vm06.local ceph-mon[81259]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:13 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/2441276346' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:13.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:13 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/2497520483' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:13.519 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:17:13.519 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": null, 2026-03-09T20:17:13.519 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": false, 2026-03-09T20:17:13.519 INFO:teuthology.orchestra.run.vm01.stdout: "which": "", 2026-03-09T20:17:13.519 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:17:13.519 INFO:teuthology.orchestra.run.vm01.stdout: "progress": null, 2026-03-09T20:17:13.519 INFO:teuthology.orchestra.run.vm01.stdout: "message": "", 2026-03-09T20:17:13.519 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:17:13.519 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:17:13.570 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:17:14.091 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T20:17:14.164 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --services rgw.foo' 2026-03-09T20:17:14.344 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:14 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/882363641' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:17:14.346 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:14 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/882363641' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:17:14.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:14 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/882363641' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:17:15.004 INFO:teuthology.orchestra.run.vm01.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:15.053 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T20:17:15.183 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[97066]: from='client.44400 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:15.183 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[97066]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:15.183 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:15.183 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:15.183 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:15.183 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:15.434 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[98877]: from='client.44400 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:15.434 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[98877]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:15.434 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:15.434 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:15.434 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:15.434 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:15 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:15 vm06.local ceph-mon[81259]: from='client.44400 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:15 vm06.local ceph-mon[81259]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:15 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:15 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:15 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:15.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:15 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:15.646 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (17m) 2m ago 23m 24.2M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (4m) 36s ago 23m 76.8M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (5m) 2m ago 23m 52.5M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (5m) 36s ago 25m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (13m) 2m ago 26m 559M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (4m) 2m ago 26m 53.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (4m) 36s ago 25m 49.2M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (3m) 2m ago 25m 42.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (17m) 2m ago 24m 10.4M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (17m) 36s ago 24m 10.1M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:17:16.132 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (3m) 2m ago 25m 70.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (2m) 2m ago 25m 49.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (2m) 2m ago 25m 44.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (2m) 2m ago 24m 13.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b4e0b13e46c0 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (103s) 36s ago 24m 48.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e ddc3d6821c0a 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (81s) 36s ago 24m 46.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f56302ede3ae 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (59s) 36s ago 24m 44.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 848df75f9503 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (37s) 36s ago 24m 12.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 144cd471498b 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (5m) 36s ago 23m 52.9M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (23m) 2m ago 23m 96.8M - 17.2.0 e1d6a67b021e 86ff044460d1 2026-03-09T20:17:16.133 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (23m) 36s ago 23m 96.4M - 17.2.0 e1d6a67b021e ec6c97005ac6 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 13 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:17:16.427 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='client.54412 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "services": "rgw.foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all mgr 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all mon 2026-03-09T20:17:16.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all crash 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all osd 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all mds 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/3102964595' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='client.54412 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "services": "rgw.foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all mgr 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all mon 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all crash 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all osd 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all mds 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:16.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:16 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/3102964595' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.646 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:17:16.646 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:17:16.646 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:17:16.646 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading daemons in service(s) rgw.foo", 2026-03-09T20:17:16.646 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:17:16.646 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "0/2 daemons upgraded", 2026-03-09T20:17:16.646 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading rgw daemons", 2026-03-09T20:17:16.646 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:17:16.646 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='client.54412 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "services": "rgw.foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all mgr 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all mon 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all crash 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all osd 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all mds 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.zyuelz", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:16 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/3102964595' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:17.222 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:17.011Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:17.222 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:17.012Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:17.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[97066]: from='client.54418 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:17.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[97066]: Upgrade: Updating rgw.foo.vm01.zyuelz (1/2) 2026-03-09T20:17:17.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[97066]: Deploying daemon rgw.foo.vm01.zyuelz on vm01 2026-03-09T20:17:17.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[97066]: from='client.54421 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:17.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[97066]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:17.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[97066]: from='client.54427 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[98877]: from='client.54418 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[98877]: Upgrade: Updating rgw.foo.vm01.zyuelz (1/2) 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[98877]: Deploying daemon rgw.foo.vm01.zyuelz on vm01 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[98877]: from='client.54421 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[98877]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[98877]: from='client.54427 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:17.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:17 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:17.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:17 vm06.local ceph-mon[81259]: from='client.54418 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:17.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:17 vm06.local ceph-mon[81259]: Upgrade: Updating rgw.foo.vm01.zyuelz (1/2) 2026-03-09T20:17:17.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:17 vm06.local ceph-mon[81259]: Deploying daemon rgw.foo.vm01.zyuelz on vm01 2026-03-09T20:17:17.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:17 vm06.local ceph-mon[81259]: from='client.54421 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:17.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:17 vm06.local ceph-mon[81259]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:17.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:17 vm06.local ceph-mon[81259]: from='client.54427 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:17.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:17 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:17.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:17 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:18.658 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:18 vm06.local ceph-mon[81259]: from='client.34418 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:18.658 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:18 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:18.658 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:18 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:17:18.658 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:18 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:18 vm01.local ceph-mon[97066]: from='client.34418 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:18 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:18 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:17:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:18 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:18 vm01.local ceph-mon[98877]: from='client.34418 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:18 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:18 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.lmiczb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T20:17:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:18 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[97066]: Upgrade: Updating rgw.foo.vm06.lmiczb (2/2) 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[97066]: Deploying daemon rgw.foo.vm06.lmiczb on vm06 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[97066]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 0 B/s wr, 15 op/s 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[98877]: Upgrade: Updating rgw.foo.vm06.lmiczb (2/2) 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[98877]: Deploying daemon rgw.foo.vm06.lmiczb on vm06 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[98877]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 0 B/s wr, 15 op/s 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:19.781 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:19 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:19.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:19 vm06.local ceph-mon[81259]: Upgrade: Updating rgw.foo.vm06.lmiczb (2/2) 2026-03-09T20:17:19.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:19 vm06.local ceph-mon[81259]: Deploying daemon rgw.foo.vm06.lmiczb on vm06 2026-03-09T20:17:19.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:19 vm06.local ceph-mon[81259]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 267 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 0 B/s wr, 15 op/s 2026-03-09T20:17:19.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:19 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:19.843 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:19 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:20.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.547 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:21 vm06.local ceph-mon[81259]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 0 B/s wr, 53 op/s 2026-03-09T20:17:21.547 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:21 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:21 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:21 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.846 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:21 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[97066]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 0 B/s wr, 53 op/s 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[98877]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 0 B/s wr, 53 op/s 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:21.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:21 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:17:22] "GET /metrics HTTP/1.1" 200 37939 "" "Prometheus/2.51.0" 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all rgw 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm01.zyuelz"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm01.zyuelz"}]': finished 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm06.lmiczb"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm06.lmiczb"}]': finished 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all nfs 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: Upgrade: Finalizing container_image settings 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]': finished 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:17:22.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: Upgrade: Complete! 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: Checking dashboard <-> RGW credentials 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 170 B/s wr, 136 op/s 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all rgw 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm01.zyuelz"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm01.zyuelz"}]': finished 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm06.lmiczb"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm06.lmiczb"}]': finished 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:17:22.880 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all nfs 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: Upgrade: Finalizing container_image settings 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: Upgrade: Complete! 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: Checking dashboard <-> RGW credentials 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 170 B/s wr, 136 op/s 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:22.881 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:22 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all rgw 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm01.zyuelz"}]: dispatch 2026-03-09T20:17:23.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm01.zyuelz"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm06.lmiczb"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm06.lmiczb"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all nfs 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all nvmeof 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: Upgrade: Finalizing container_image settings 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: Upgrade: Complete! 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T20:17:23.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T20:17:23.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:23.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:23.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: Checking dashboard <-> RGW credentials 2026-03-09T20:17:23.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:23.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:23.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 170 B/s wr, 136 op/s 2026-03-09T20:17:23.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:23.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:23.094 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:22 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:25.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:25 vm06.local ceph-mon[81259]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 170 B/s wr, 169 op/s 2026-03-09T20:17:25.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:25 vm01.local ceph-mon[97066]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 170 B/s wr, 169 op/s 2026-03-09T20:17:25.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:25 vm01.local ceph-mon[98877]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 170 B/s wr, 169 op/s 2026-03-09T20:17:26.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:26 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:26.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:26 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:26.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:26 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:27.301 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:27.012Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:27.301 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:27.013Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:27.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:27 vm06.local ceph-mon[81259]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 170 B/s wr, 169 op/s 2026-03-09T20:17:27.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:27 vm01.local ceph-mon[97066]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 170 B/s wr, 169 op/s 2026-03-09T20:17:27.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:27 vm01.local ceph-mon[98877]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 170 B/s wr, 169 op/s 2026-03-09T20:17:29.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:29 vm06.local ceph-mon[81259]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 111 KiB/s rd, 170 B/s wr, 170 op/s 2026-03-09T20:17:29.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:29 vm01.local ceph-mon[97066]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 111 KiB/s rd, 170 B/s wr, 170 op/s 2026-03-09T20:17:29.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:29 vm01.local ceph-mon[98877]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 111 KiB/s rd, 170 B/s wr, 170 op/s 2026-03-09T20:17:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:31 vm06.local ceph-mon[81259]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 101 KiB/s rd, 170 B/s wr, 155 op/s 2026-03-09T20:17:31.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:31 vm01.local ceph-mon[97066]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 101 KiB/s rd, 170 B/s wr, 155 op/s 2026-03-09T20:17:31.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:31 vm01.local ceph-mon[98877]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 101 KiB/s rd, 170 B/s wr, 155 op/s 2026-03-09T20:17:32.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:17:32 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:17:32] "GET /metrics HTTP/1.1" 200 37939 "" "Prometheus/2.51.0" 2026-03-09T20:17:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:33 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:33 vm06.local ceph-mon[81259]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 76 KiB/s rd, 170 B/s wr, 116 op/s 2026-03-09T20:17:33.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:33 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:33.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:33 vm01.local ceph-mon[97066]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 76 KiB/s rd, 170 B/s wr, 116 op/s 2026-03-09T20:17:33.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:33 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:33.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:33 vm01.local ceph-mon[98877]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 76 KiB/s rd, 170 B/s wr, 116 op/s 2026-03-09T20:17:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:35 vm01.local ceph-mon[97066]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 34 op/s 2026-03-09T20:17:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:35 vm01.local ceph-mon[98877]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 34 op/s 2026-03-09T20:17:35.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:35.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:35 vm06.local ceph-mon[81259]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 34 op/s 2026-03-09T20:17:35.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:37.375 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:37 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:37.013Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:37.375 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:37 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:37.014Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:37.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:37 vm01.local ceph-mon[97066]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 2 op/s 2026-03-09T20:17:37.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:37 vm01.local ceph-mon[98877]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 2 op/s 2026-03-09T20:17:37.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:37 vm06.local ceph-mon[81259]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 2 op/s 2026-03-09T20:17:39.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:39 vm06.local ceph-mon[81259]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:17:39.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:39 vm01.local ceph-mon[97066]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:17:39.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:39 vm01.local ceph-mon[98877]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:17:41.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:41 vm06.local ceph-mon[81259]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:41.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:41 vm01.local ceph-mon[97066]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:41.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:41 vm01.local ceph-mon[98877]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:42.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:17:42 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:17:42] "GET /metrics HTTP/1.1" 200 37974 "" "Prometheus/2.51.0" 2026-03-09T20:17:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:42 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:42.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:42 vm01.local ceph-mon[97066]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:42 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:42.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:42 vm01.local ceph-mon[98877]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:42 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:43.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:42 vm06.local ceph-mon[81259]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:45.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:45 vm06.local ceph-mon[81259]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:45 vm01.local ceph-mon[97066]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:45.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:45 vm01.local ceph-mon[98877]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:47.154 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:47 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:47.014Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:47.154 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:47 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:47.014Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:47.181 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:17:47.486 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:47 vm01.local ceph-mon[97066]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:47.487 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:47 vm01.local ceph-mon[98877]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:47.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:47 vm06.local ceph-mon[81259]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:47.926 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:17:47.926 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (17m) 27s ago 24m 26.3M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:17:47.926 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (5m) 27s ago 24m 76.8M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:17:47.926 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (5m) 27s ago 24m 52.7M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:17:47.926 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (5m) 27s ago 26m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:17:47.926 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (14m) 27s ago 26m 565M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:17:47.926 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (4m) 27s ago 26m 61.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:17:47.926 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (5m) 27s ago 26m 50.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:17:47.926 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (4m) 27s ago 26m 46.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (17m) 27s ago 24m 10.2M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (17m) 27s ago 24m 10.1M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (3m) 27s ago 25m 75.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (3m) 27s ago 25m 56.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (3m) 27s ago 25m 48.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (2m) 27s ago 25m 52.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b4e0b13e46c0 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (2m) 27s ago 25m 52.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e ddc3d6821c0a 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (112s) 27s ago 25m 48.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f56302ede3ae 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (91s) 27s ago 25m 46.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 848df75f9503 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (69s) 27s ago 24m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 144cd471498b 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (5m) 27s ago 24m 52.9M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (30s) 27s ago 24m 100M - 19.2.3-678-ge911bdeb 654f31e6858e 67b62e7f71bd 2026-03-09T20:17:47.927 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (28s) 27s ago 24m 95.2M - 19.2.3-678-ge911bdeb 654f31e6858e c750ea62cf38 2026-03-09T20:17:47.978 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.rgw | length == 1'"'"'' 2026-03-09T20:17:48.593 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:17:48.593 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:48 vm01.local ceph-mon[97066]: from='client.54502 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:48.594 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:48 vm01.local ceph-mon[98877]: from='client.54502 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:48.635 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.rgw | keys'"'"' | grep $sha1' 2026-03-09T20:17:48.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:48 vm06.local ceph-mon[81259]: from='client.54502 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:49.754 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:49 vm01.local ceph-mon[97066]: from='client.34478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:49.754 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:49 vm01.local ceph-mon[97066]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:49.754 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:49 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/3616628557' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:49.755 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:49 vm01.local ceph-mon[98877]: from='client.34478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:49.755 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:49 vm01.local ceph-mon[98877]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:49.755 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:49 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/3616628557' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:49 vm06.local ceph-mon[81259]: from='client.34478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:49 vm06.local ceph-mon[81259]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:49.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:49 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/3616628557' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:49.923 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T20:17:49.975 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T20:17:50.513 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:17:50.514 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": null, 2026-03-09T20:17:50.514 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": false, 2026-03-09T20:17:50.514 INFO:teuthology.orchestra.run.vm01.stdout: "which": "", 2026-03-09T20:17:50.514 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [], 2026-03-09T20:17:50.514 INFO:teuthology.orchestra.run.vm01.stdout: "progress": null, 2026-03-09T20:17:50.514 INFO:teuthology.orchestra.run.vm01.stdout: "message": "", 2026-03-09T20:17:50.514 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:17:50.514 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:17:50.593 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:17:50.768 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:50 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/3608372819' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:50.768 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:50 vm01.local ceph-mon[97066]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:50.768 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:50 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:50.768 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:50 vm01.local ceph-mon[97066]: from='client.44517 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:50.768 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:50 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/3608372819' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:50.768 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:50 vm01.local ceph-mon[98877]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:50.768 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:50 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:50.768 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:50 vm01.local ceph-mon[98877]: from='client.44517 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:50 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/3608372819' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:50 vm06.local ceph-mon[81259]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:50 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:17:51.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:50 vm06.local ceph-mon[81259]: from='client.44517 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:51.130 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T20:17:51.198 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-09T20:17:51.729 INFO:teuthology.orchestra.run.vm01.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:51.730 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:51 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/4113491437' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:17:51.731 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:51 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/4113491437' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:17:51.791 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T20:17:51.794 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm01.local 2026-03-09T20:17:51.794 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done' 2026-03-09T20:17:51.993 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:51 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/4113491437' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:17:52.393 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:17:52.665 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:17:52] "GET /metrics HTTP/1.1" 200 37973 "" "Prometheus/2.51.0" 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (17m) 32s ago 24m 26.3M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (5m) 32s ago 24m 76.8M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (5m) 32s ago 24m 52.7M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (5m) 32s ago 26m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (14m) 32s ago 26m 565M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (4m) 32s ago 26m 61.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (5m) 32s ago 26m 50.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (4m) 32s ago 26m 46.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (18m) 32s ago 24m 10.2M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (17m) 32s ago 24m 10.1M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (3m) 32s ago 25m 75.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (3m) 32s ago 25m 56.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (3m) 32s ago 25m 48.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (2m) 32s ago 25m 52.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b4e0b13e46c0 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (2m) 32s ago 25m 52.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e ddc3d6821c0a 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (117s) 32s ago 25m 48.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f56302ede3ae 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (96s) 32s ago 25m 46.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 848df75f9503 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (74s) 32s ago 25m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 144cd471498b 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (5m) 32s ago 24m 52.9M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (35s) 32s ago 24m 100M - 19.2.3-678-ge911bdeb 654f31e6858e 67b62e7f71bd 2026-03-09T20:17:52.883 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (33s) 32s ago 24m 95.2M - 19.2.3-678-ge911bdeb 654f31e6858e c750ea62cf38 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='client.54532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all mgr 2026-03-09T20:17:52.930 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all mon 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all crash 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all osd 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all mds 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all rgw 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='client.44532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='client.54532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all mgr 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all mon 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all crash 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all osd 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all mds 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.931 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all rgw 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='client.44532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:17:52.932 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:52 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='client.54532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all mgr 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all mon 2026-03-09T20:17:53.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all crash 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all osd 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all mds 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all rgw 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='client.44532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm01.zbymog", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T20:17:53.093 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:52 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 15 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:17:53.192 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:17:53.412 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:17:53.412 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:17:53.412 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:17:53.412 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T20:17:53.412 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [ 2026-03-09T20:17:53.412 INFO:teuthology.orchestra.run.vm01.stdout: "osd", 2026-03-09T20:17:53.412 INFO:teuthology.orchestra.run.vm01.stdout: "rgw", 2026-03-09T20:17:53.413 INFO:teuthology.orchestra.run.vm01.stdout: "mon", 2026-03-09T20:17:53.413 INFO:teuthology.orchestra.run.vm01.stdout: "mgr" 2026-03-09T20:17:53.413 INFO:teuthology.orchestra.run.vm01.stdout: ], 2026-03-09T20:17:53.413 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "15/21 daemons upgraded", 2026-03-09T20:17:53.413 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Currently upgrading iscsi daemons", 2026-03-09T20:17:53.413 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": false 2026-03-09T20:17:53.413 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:17:53.661 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-03-09T20:17:54.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:53 vm06.local ceph-mon[81259]: from='client.44538 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:54.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:53 vm06.local ceph-mon[81259]: Upgrade: Updating iscsi.foo.vm01.zbymog 2026-03-09T20:17:54.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:53 vm06.local ceph-mon[81259]: Deploying daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:17:54.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:53 vm06.local ceph-mon[81259]: from='client.54550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:54.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:53 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/319913225' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:54.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:53 vm06.local ceph-mon[81259]: from='client.34517 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:54.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:53 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/36074226' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:17:54.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[97066]: from='client.44538 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:54.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[97066]: Upgrade: Updating iscsi.foo.vm01.zbymog 2026-03-09T20:17:54.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[97066]: Deploying daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:17:54.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[97066]: from='client.54550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:54.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/319913225' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:54.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[97066]: from='client.34517 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:54.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/36074226' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:17:54.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[98877]: from='client.44538 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[98877]: Upgrade: Updating iscsi.foo.vm01.zbymog 2026-03-09T20:17:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[98877]: Deploying daemon iscsi.foo.vm01.zbymog on vm01 2026-03-09T20:17:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[98877]: from='client.54550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/319913225' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:17:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[98877]: from='client.34517 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:17:54.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:53 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/36074226' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:17:55.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:54 vm06.local ceph-mon[81259]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:55.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:54 vm01.local ceph-mon[97066]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:55.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:54 vm01.local ceph-mon[98877]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:57.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:57 vm01.local ceph-mon[98877]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:57.015Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:57.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:17:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:17:57.016Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:17:57.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:57 vm01.local ceph-mon[97066]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:57.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:57 vm06.local ceph-mon[81259]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:17:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:17:59 vm06.local ceph-mon[81259]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:59.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:17:59 vm01.local ceph-mon[97066]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:17:59.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:17:59 vm01.local ceph-mon[98877]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:01.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:01 vm06.local ceph-mon[81259]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:01.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:01 vm01.local ceph-mon[97066]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:01.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:01 vm01.local ceph-mon[98877]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:02.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:18:02 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:18:02] "GET /metrics HTTP/1.1" 200 37973 "" "Prometheus/2.51.0" 2026-03-09T20:18:03.458 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:03 vm01.local ceph-mon[97066]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:03.458 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:03 vm01.local ceph-mon[97066]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:03.458 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:03 vm01.local ceph-mon[98877]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:03.458 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:03 vm01.local ceph-mon[98877]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:03 vm06.local ceph-mon[81259]: from='client.15186 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:03 vm06.local ceph-mon[81259]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:03.710 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:18:03 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:18:03.707+0000 7f1337f47640 -1 log_channel(cephadm) log [ERR] : Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed. 2026-03-09T20:18:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:04 vm06.local ceph-mon[81259]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed. 2026-03-09T20:18:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:04 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:18:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:04 vm06.local ceph-mon[81259]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:18:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:04 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:18:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:04 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:18:05.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:04 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:18:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[97066]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed. 2026-03-09T20:18:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:18:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[97066]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:18:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:18:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:18:05.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:18:05.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[98877]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed. 2026-03-09T20:18:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:18:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[98877]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T20:18:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:18:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:18:05.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:04 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:18:06.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:05 vm06.local ceph-mon[81259]: Health check failed: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-09T20:18:06.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:05 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:18:06.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:05 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:06.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:05 vm01.local ceph-mon[97066]: Health check failed: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-09T20:18:06.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:05 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:18:06.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:05 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:06.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:05 vm01.local ceph-mon[98877]: Health check failed: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-09T20:18:06.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:05 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:18:06.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:05 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:07.348 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:07 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:07.016Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:07.349 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:07 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:07.017Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:07.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:07 vm01.local ceph-mon[97066]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 884 B/s rd, 0 op/s 2026-03-09T20:18:07.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:07 vm01.local ceph-mon[98877]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 884 B/s rd, 0 op/s 2026-03-09T20:18:07.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:07 vm06.local ceph-mon[81259]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 884 B/s rd, 0 op/s 2026-03-09T20:18:09.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:09 vm01.local ceph-mon[97066]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 884 B/s rd, 0 op/s 2026-03-09T20:18:09.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:09 vm01.local ceph-mon[98877]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 884 B/s rd, 0 op/s 2026-03-09T20:18:09.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:09 vm06.local ceph-mon[81259]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 884 B/s rd, 0 op/s 2026-03-09T20:18:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:11 vm06.local ceph-mon[81259]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 442 B/s rd, 0 op/s 2026-03-09T20:18:11.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:11 vm01.local ceph-mon[97066]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 442 B/s rd, 0 op/s 2026-03-09T20:18:11.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:11 vm01.local ceph-mon[98877]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 442 B/s rd, 0 op/s 2026-03-09T20:18:12.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:18:12 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:18:12] "GET /metrics HTTP/1.1" 200 38053 "" "Prometheus/2.51.0" 2026-03-09T20:18:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:13 vm06.local ceph-mon[81259]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 442 B/s rd, 0 op/s 2026-03-09T20:18:13.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:13 vm01.local ceph-mon[97066]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 442 B/s rd, 0 op/s 2026-03-09T20:18:13.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:13 vm01.local ceph-mon[98877]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 442 B/s rd, 0 op/s 2026-03-09T20:18:14.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:14 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/4184447024' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:18:14.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:14 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/4184447024' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:18:14.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:14 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/4184447024' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T20:18:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:15 vm06.local ceph-mon[81259]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 442 B/s rd, 0 op/s 2026-03-09T20:18:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:15 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/3089482871' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3821923821"}]: dispatch 2026-03-09T20:18:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:15 vm01.local ceph-mon[97066]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 442 B/s rd, 0 op/s 2026-03-09T20:18:15.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:15 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/3089482871' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3821923821"}]: dispatch 2026-03-09T20:18:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:15 vm01.local ceph-mon[98877]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 442 B/s rd, 0 op/s 2026-03-09T20:18:15.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:15 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/3089482871' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3821923821"}]: dispatch 2026-03-09T20:18:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:16 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/3089482871' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3821923821"}]': finished 2026-03-09T20:18:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:16 vm06.local ceph-mon[81259]: osdmap e138: 8 total, 8 up, 8 in 2026-03-09T20:18:16.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:16 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/4140805914' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/31358619"}]: dispatch 2026-03-09T20:18:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:16 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/3089482871' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3821923821"}]': finished 2026-03-09T20:18:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:16 vm01.local ceph-mon[97066]: osdmap e138: 8 total, 8 up, 8 in 2026-03-09T20:18:16.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:16 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/4140805914' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/31358619"}]: dispatch 2026-03-09T20:18:16.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:16 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/3089482871' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3821923821"}]': finished 2026-03-09T20:18:16.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:16 vm01.local ceph-mon[98877]: osdmap e138: 8 total, 8 up, 8 in 2026-03-09T20:18:16.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:16 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/4140805914' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/31358619"}]: dispatch 2026-03-09T20:18:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:17.017Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:17.018Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:17 vm06.local ceph-mon[81259]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:18:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:17 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/4140805914' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/31358619"}]': finished 2026-03-09T20:18:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:17 vm06.local ceph-mon[81259]: osdmap e139: 8 total, 8 up, 8 in 2026-03-09T20:18:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:17 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/447795285' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3715103597"}]: dispatch 2026-03-09T20:18:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-mon[97066]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:18:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/4140805914' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/31358619"}]': finished 2026-03-09T20:18:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-mon[97066]: osdmap e139: 8 total, 8 up, 8 in 2026-03-09T20:18:17.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/447795285' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3715103597"}]: dispatch 2026-03-09T20:18:17.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-mon[98877]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:18:17.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/4140805914' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/31358619"}]': finished 2026-03-09T20:18:17.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-mon[98877]: osdmap e139: 8 total, 8 up, 8 in 2026-03-09T20:18:17.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:17 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/447795285' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3715103597"}]: dispatch 2026-03-09T20:18:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:18 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/447795285' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3715103597"}]': finished 2026-03-09T20:18:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:18 vm06.local ceph-mon[81259]: osdmap e140: 8 total, 8 up, 8 in 2026-03-09T20:18:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:18 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/2019142570' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1885779994"}]: dispatch 2026-03-09T20:18:18.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:18 vm06.local ceph-mon[81259]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1885779994"}]: dispatch 2026-03-09T20:18:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:18 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/447795285' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3715103597"}]': finished 2026-03-09T20:18:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:18 vm01.local ceph-mon[97066]: osdmap e140: 8 total, 8 up, 8 in 2026-03-09T20:18:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:18 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/2019142570' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1885779994"}]: dispatch 2026-03-09T20:18:18.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:18 vm01.local ceph-mon[97066]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1885779994"}]: dispatch 2026-03-09T20:18:18.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:18 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/447795285' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3715103597"}]': finished 2026-03-09T20:18:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:18 vm01.local ceph-mon[98877]: osdmap e140: 8 total, 8 up, 8 in 2026-03-09T20:18:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:18 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/2019142570' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1885779994"}]: dispatch 2026-03-09T20:18:18.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:18 vm01.local ceph-mon[98877]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1885779994"}]: dispatch 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[97066]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[97066]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1885779994"}]': finished 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[97066]: osdmap e141: 8 total, 8 up, 8 in 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/2703981390' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3584432420"}]: dispatch 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[97066]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3584432420"}]: dispatch 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[98877]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[98877]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1885779994"}]': finished 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[98877]: osdmap e141: 8 total, 8 up, 8 in 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/2703981390' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3584432420"}]: dispatch 2026-03-09T20:18:19.935 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:19 vm01.local ceph-mon[98877]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3584432420"}]: dispatch 2026-03-09T20:18:20.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:19 vm06.local ceph-mon[81259]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-09T20:18:20.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:19 vm06.local ceph-mon[81259]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6801/1885779994"}]': finished 2026-03-09T20:18:20.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:19 vm06.local ceph-mon[81259]: osdmap e141: 8 total, 8 up, 8 in 2026-03-09T20:18:20.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:19 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/2703981390' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3584432420"}]: dispatch 2026-03-09T20:18:20.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:19 vm06.local ceph-mon[81259]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3584432420"}]: dispatch 2026-03-09T20:18:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:20 vm06.local ceph-mon[81259]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 971 B/s rd, 1 op/s 2026-03-09T20:18:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:20 vm06.local ceph-mon[81259]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3584432420"}]': finished 2026-03-09T20:18:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:20 vm06.local ceph-mon[81259]: osdmap e142: 8 total, 8 up, 8 in 2026-03-09T20:18:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:20 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/834808282' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1885779994"}]: dispatch 2026-03-09T20:18:21.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[97066]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 971 B/s rd, 1 op/s 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[97066]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3584432420"}]': finished 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[97066]: osdmap e142: 8 total, 8 up, 8 in 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/834808282' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1885779994"}]: dispatch 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[98877]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 971 B/s rd, 1 op/s 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[98877]: from='client.? ' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:0/3584432420"}]': finished 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[98877]: osdmap e142: 8 total, 8 up, 8 in 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/834808282' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1885779994"}]: dispatch 2026-03-09T20:18:21.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:21 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/834808282' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1885779994"}]': finished 2026-03-09T20:18:22.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:21 vm06.local ceph-mon[81259]: osdmap e143: 8 total, 8 up, 8 in 2026-03-09T20:18:22.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:21 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/834808282' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1885779994"}]': finished 2026-03-09T20:18:22.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:21 vm01.local ceph-mon[97066]: osdmap e143: 8 total, 8 up, 8 in 2026-03-09T20:18:22.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:21 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/834808282' entity='client.iscsi.foo.vm01.zbymog' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.101:6800/1885779994"}]': finished 2026-03-09T20:18:22.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:21 vm01.local ceph-mon[98877]: osdmap e143: 8 total, 8 up, 8 in 2026-03-09T20:18:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:18:22 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:18:22] "GET /metrics HTTP/1.1" 200 38053 "" "Prometheus/2.51.0" 2026-03-09T20:18:23.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:22 vm06.local ceph-mon[81259]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 972 B/s rd, 1 op/s 2026-03-09T20:18:23.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:22 vm01.local ceph-mon[97066]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 972 B/s rd, 1 op/s 2026-03-09T20:18:23.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:22 vm01.local ceph-mon[98877]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 972 B/s rd, 1 op/s 2026-03-09T20:18:23.877 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:18:24.084 INFO:teuthology.orchestra.run.vm01.stdout:"Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed." 2026-03-09T20:18:24.124 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:18:24.626 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:18:24.626 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (18m) 64s ago 25m 26.3M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:18:24.626 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (6m) 64s ago 24m 76.8M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:18:24.626 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (6m) 64s ago 24m 52.7M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:18:24.626 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (6m) 64s ago 26m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:18:24.626 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (14m) 64s ago 27m 565M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (5m) 64s ago 27m 61.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (5m) 64s ago 26m 50.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (4m) 64s ago 26m 46.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (18m) 64s ago 25m 10.2M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (18m) 64s ago 25m 10.1M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (4m) 64s ago 26m 75.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (4m) 64s ago 26m 56.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (3m) 64s ago 26m 48.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (3m) 64s ago 26m 52.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b4e0b13e46c0 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (2m) 64s ago 25m 52.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e ddc3d6821c0a 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (2m) 64s ago 25m 48.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f56302ede3ae 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (2m) 64s ago 25m 46.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 848df75f9503 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (106s) 64s ago 25m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 144cd471498b 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (6m) 64s ago 25m 52.9M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (67s) 64s ago 24m 100M - 19.2.3-678-ge911bdeb 654f31e6858e 67b62e7f71bd 2026-03-09T20:18:24.627 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (65s) 64s ago 24m 95.2M - 19.2.3-678-ge911bdeb 654f31e6858e c750ea62cf38 2026-03-09T20:18:25.000 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T20:18:25.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:25 vm01.local ceph-mon[97066]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T20:18:25.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:25 vm01.local ceph-mon[97066]: from='client.44604 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:18:25.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:25 vm01.local ceph-mon[97066]: from='client.54610 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:18:25.052 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:25 vm01.local ceph-mon[97066]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:25.053 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:25 vm01.local ceph-mon[98877]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T20:18:25.053 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:25 vm01.local ceph-mon[98877]: from='client.44604 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:18:25.053 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:25 vm01.local ceph-mon[98877]: from='client.54610 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:18:25.053 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:25 vm01.local ceph-mon[98877]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:25.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:25 vm06.local ceph-mon[81259]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T20:18:25.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:25 vm06.local ceph-mon[81259]: from='client.44604 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:18:25.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:25 vm06.local ceph-mon[81259]: from='client.54610 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:18:25.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:25 vm06.local ceph-mon[81259]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 15 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:18:25.577 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:18:25.634 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'echo "wait for servicemap items w/ changing names to refresh"' 2026-03-09T20:18:25.913 INFO:teuthology.orchestra.run.vm01.stdout:wait for servicemap items w/ changing names to refresh 2026-03-09T20:18:25.955 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 60' 2026-03-09T20:18:26.119 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:26 vm01.local ceph-mon[97066]: from='client.54613 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:18:26.119 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:26 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/835619183' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:18:26.119 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:26 vm01.local ceph-mon[98877]: from='client.54613 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:18:26.119 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:26 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/835619183' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:18:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:26 vm06.local ceph-mon[81259]: from='client.54613 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:18:26.342 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:26 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/835619183' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:18:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:27.017Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:27.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:27.019Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:27.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:27 vm01.local ceph-mon[97066]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 711 B/s rd, 0 op/s 2026-03-09T20:18:27.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:27 vm01.local ceph-mon[98877]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 711 B/s rd, 0 op/s 2026-03-09T20:18:27.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:27 vm06.local ceph-mon[81259]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 711 B/s rd, 0 op/s 2026-03-09T20:18:29.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:29 vm06.local ceph-mon[81259]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T20:18:29.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:29 vm01.local ceph-mon[97066]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T20:18:29.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:29 vm01.local ceph-mon[98877]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T20:18:31.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:31 vm06.local ceph-mon[81259]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T20:18:31.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:31 vm01.local ceph-mon[97066]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T20:18:31.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:31 vm01.local ceph-mon[98877]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T20:18:32.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:18:32 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:18:32] "GET /metrics HTTP/1.1" 200 38053 "" "Prometheus/2.51.0" 2026-03-09T20:18:33.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:33 vm06.local ceph-mon[81259]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 939 B/s rd, 0 op/s 2026-03-09T20:18:33.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:33 vm01.local ceph-mon[97066]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 939 B/s rd, 0 op/s 2026-03-09T20:18:33.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:33 vm01.local ceph-mon[98877]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 939 B/s rd, 0 op/s 2026-03-09T20:18:35.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:35 vm06.local ceph-mon[81259]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:35.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:35 vm06.local ceph-mon[81259]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:35.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:35 vm01.local ceph-mon[97066]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:35 vm01.local ceph-mon[97066]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:35.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:35.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:35 vm01.local ceph-mon[98877]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:35.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:35 vm01.local ceph-mon[98877]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:35.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:37 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:37.019Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:37.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:37 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:37.020Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:37.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:37 vm01.local ceph-mon[98877]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:37.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:37 vm01.local ceph-mon[97066]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:37.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:37 vm06.local ceph-mon[81259]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:39.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:39 vm06.local ceph-mon[81259]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:39.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:39 vm01.local ceph-mon[97066]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:39.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:39 vm01.local ceph-mon[98877]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:41.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:41 vm06.local ceph-mon[81259]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:41.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:41 vm01.local ceph-mon[97066]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:41.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:41 vm01.local ceph-mon[98877]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:42.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:18:42 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:18:42] "GET /metrics HTTP/1.1" 200 38047 "" "Prometheus/2.51.0" 2026-03-09T20:18:43.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:43 vm06.local ceph-mon[81259]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:43.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:43 vm01.local ceph-mon[97066]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:43.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:43 vm01.local ceph-mon[98877]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:45.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:45 vm06.local ceph-mon[81259]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:45.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:45 vm06.local ceph-mon[81259]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:45 vm01.local ceph-mon[97066]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:45.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:45 vm01.local ceph-mon[97066]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:45.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:45 vm01.local ceph-mon[98877]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:45.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:45 vm01.local ceph-mon[98877]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:47.378 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:47 vm01.local ceph-mon[97066]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:47 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:47.020Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:47 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:47.021Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:47.378 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:47 vm01.local ceph-mon[98877]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:47.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:47 vm06.local ceph-mon[81259]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:49.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:49 vm06.local ceph-mon[81259]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:49.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:49 vm01.local ceph-mon[97066]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:49.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:49 vm01.local ceph-mon[98877]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:50.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:50 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:50 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:50.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:50 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:18:51.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:51 vm06.local ceph-mon[81259]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:51.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:51 vm01.local ceph-mon[97066]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:51.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:51 vm01.local ceph-mon[98877]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:52.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:18:52 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:18:52] "GET /metrics HTTP/1.1" 200 38049 "" "Prometheus/2.51.0" 2026-03-09T20:18:53.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:53 vm06.local ceph-mon[81259]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:53.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:53 vm01.local ceph-mon[97066]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:53.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:53 vm01.local ceph-mon[98877]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:55 vm06.local ceph-mon[81259]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:55.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:55 vm06.local ceph-mon[81259]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:55.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:55 vm01.local ceph-mon[97066]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:55.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:55 vm01.local ceph-mon[97066]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:55.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:55 vm01.local ceph-mon[98877]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:18:55.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:55 vm01.local ceph-mon[98877]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:18:57.299 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:57.020Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:57.299 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:18:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:18:57.021Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:18:57.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:57 vm06.local ceph-mon[81259]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:57.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:57 vm01.local ceph-mon[97066]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:57.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:57 vm01.local ceph-mon[98877]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:59.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:18:59 vm06.local ceph-mon[81259]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:59.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:18:59 vm01.local ceph-mon[97066]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:18:59.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:18:59 vm01.local ceph-mon[98877]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:01.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:01 vm06.local ceph-mon[81259]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:01.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:01 vm01.local ceph-mon[97066]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:01.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:01 vm01.local ceph-mon[98877]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:02.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:02 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:19:02] "GET /metrics HTTP/1.1" 200 38049 "" "Prometheus/2.51.0" 2026-03-09T20:19:03.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:03 vm06.local ceph-mon[81259]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:03.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:03 vm01.local ceph-mon[97066]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:03.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:03 vm01.local ceph-mon[98877]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:04 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:19:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:04 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:19:04.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:04 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:19:04.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:04 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:19:04.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:04 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:19:04.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:04 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:19:04.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:04 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T20:19:04.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:04 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T20:19:04.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:04 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' 2026-03-09T20:19:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:05 vm06.local ceph-mon[81259]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:05 vm06.local ceph-mon[81259]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:05.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:05 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:19:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:05 vm01.local ceph-mon[97066]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:05 vm01.local ceph-mon[97066]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:05.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:05 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:19:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:05 vm01.local ceph-mon[98877]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:05 vm01.local ceph-mon[98877]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:05.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:05 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:19:07.330 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:07 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:07.021Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:07.330 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:07 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:07.022Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:07.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:07 vm06.local ceph-mon[81259]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:07.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:07 vm01.local ceph-mon[97066]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:07.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:07 vm01.local ceph-mon[98877]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:09.592 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:09 vm06.local ceph-mon[81259]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:09.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:09 vm01.local ceph-mon[97066]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:09.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:09 vm01.local ceph-mon[98877]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:11.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:11 vm01.local ceph-mon[97066]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:11.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:11 vm01.local ceph-mon[98877]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:11.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:11 vm06.local ceph-mon[81259]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:12.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:12 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:19:12] "GET /metrics HTTP/1.1" 200 38047 "" "Prometheus/2.51.0" 2026-03-09T20:19:13.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:13 vm01.local ceph-mon[97066]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:13.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:13 vm01.local ceph-mon[98877]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:13.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:13 vm06.local ceph-mon[81259]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:15.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:15 vm01.local ceph-mon[97066]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:15.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:15 vm01.local ceph-mon[97066]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:15.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:15 vm01.local ceph-mon[98877]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:15.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:15 vm01.local ceph-mon[98877]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:15 vm06.local ceph-mon[81259]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:15.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:15 vm06.local ceph-mon[81259]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:17.357 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:17.022Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:17.357 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:17.023Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:17.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:17 vm01.local ceph-mon[97066]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:17.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:17 vm01.local ceph-mon[98877]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:17.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:17 vm06.local ceph-mon[81259]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:19.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:19 vm01.local ceph-mon[97066]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:19.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:19 vm01.local ceph-mon[98877]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:19.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:19 vm06.local ceph-mon[81259]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:20.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:20 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:19:20.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:20 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:19:20.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:20 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:19:21.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:21 vm01.local ceph-mon[97066]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:21.628 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:21 vm01.local ceph-mon[98877]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:21.743 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:21 vm06.local ceph-mon[81259]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:22.878 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:22 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:19:22] "GET /metrics HTTP/1.1" 200 38044 "" "Prometheus/2.51.0" 2026-03-09T20:19:23.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:23 vm06.local ceph-mon[81259]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:23.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:23 vm01.local ceph-mon[97066]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:23.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:23 vm01.local ceph-mon[98877]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:25.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:25 vm06.local ceph-mon[81259]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:25.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:25 vm06.local ceph-mon[81259]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:25.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:25 vm01.local ceph-mon[97066]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:25.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:25 vm01.local ceph-mon[97066]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:25.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:25 vm01.local ceph-mon[98877]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:25.878 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:25 vm01.local ceph-mon[98877]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:26.321 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T20:19:26.810 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T20:19:26.810 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.a vm01 *:9093,9094 running (19m) 2m ago 26m 26.3M - 0.25.0 c8568f914cd2 10eae0f056a0 2026-03-09T20:19:26.810 INFO:teuthology.orchestra.run.vm01.stdout:grafana.a vm06 *:3000 running (7m) 2m ago 25m 76.8M - 10.4.0 c8b91775d855 d2acbfd0f792 2026-03-09T20:19:26.810 INFO:teuthology.orchestra.run.vm01.stdout:iscsi.foo.vm01.zbymog vm01 running (7m) 2m ago 25m 52.7M - 3.5 e1d6a67b021e fb194521cf2f 2026-03-09T20:19:26.810 INFO:teuthology.orchestra.run.vm01.stdout:mgr.x vm06 *:8443,9283,8765 running (7m) 2m ago 27m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 7af24f632a8b 2026-03-09T20:19:26.810 INFO:teuthology.orchestra.run.vm01.stdout:mgr.y vm01 *:8443,9283,8765 running (15m) 2m ago 28m 565M - 19.2.3-678-ge911bdeb 654f31e6858e 8f1a7cbf4da3 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:mon.a vm01 running (6m) 2m ago 28m 61.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 2eb02d7b7f8a 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:mon.b vm06 running (6m) 2m ago 27m 50.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 7b13932b4e7d 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:mon.c vm01 running (6m) 2m ago 27m 46.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c1e1918b4ae1 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.a vm01 *:9100 running (19m) 2m ago 26m 10.2M - 1.7.0 72c9c2088986 6e21ac8930a5 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.b vm06 *:9100 running (19m) 2m ago 26m 10.1M - 1.7.0 72c9c2088986 f65efb036188 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm01 running (5m) 2m ago 27m 75.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 53027cc5a47f 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (5m) 2m ago 27m 56.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 9a6b315e4032 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (4m) 2m ago 27m 48.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c062f84ee5c 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (4m) 2m ago 27m 52.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b4e0b13e46c0 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (3m) 2m ago 26m 52.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e ddc3d6821c0a 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm06 running (3m) 2m ago 26m 48.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f56302ede3ae 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (3m) 2m ago 26m 46.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 848df75f9503 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm06 running (2m) 2m ago 26m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 144cd471498b 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.a vm06 *:9095 running (7m) 2m ago 26m 52.9M - 2.51.0 1d3b7f56885b 865f54718b3f 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.zyuelz vm01 *:8000 running (2m) 2m ago 25m 100M - 19.2.3-678-ge911bdeb 654f31e6858e 67b62e7f71bd 2026-03-09T20:19:26.811 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm06.lmiczb vm06 *:8000 running (2m) 2m ago 25m 95.2M - 19.2.3-678-ge911bdeb 654f31e6858e c750ea62cf38 2026-03-09T20:19:26.862 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T20:19:27.068 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:27.023Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:27.068 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:27.024Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "mon": { 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "mgr": { 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "osd": { 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "rgw": { 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "overall": { 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 15 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-03-09T20:19:27.376 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:19:27.448 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T20:19:27.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:27 vm01.local ceph-mon[97066]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:27.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:27 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/3918167983' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:19:27.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:27 vm01.local ceph-mon[98877]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:27.629 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:27 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/3918167983' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:19:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:27 vm06.local ceph-mon[81259]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:27.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:27 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/3918167983' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "in_progress": true, 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "services_complete": [ 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "osd", 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "rgw", 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "mon", 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "mgr" 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: ], 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "progress": "15/21 daemons upgraded", 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "message": "Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed.", 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout: "is_paused": true 2026-03-09T20:19:27.932 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:19:28.002 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T20:19:28.542 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:28 vm01.local ceph-mon[98877]: from='client.54622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:28.542 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:28 vm01.local ceph-mon[97066]: from='client.54622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed. 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm01.zbymog on host vm01 failed. 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: Upgrade daemon: iscsi.foo.vm01.zbymog: cephadm exited with an error code: 1, stderr: Redeploy daemon iscsi.foo.vm01.zbymog ... 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:Creating ceph-iscsi config... 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/iscsi-gateway.cfg 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:Write file: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/iscsi.foo.vm01.zbymog/tcmu-runner-entrypoint.sh 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:Failed to trim old cgroups /sys/fs/cgroup/system.slice/system-ceph\x2d0d36e446\x2d1bf1\x2d11f1\x2da427\x2d9918c6d73ee3.slice/ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl start ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout:Traceback (most recent call last): 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: return _run_code(code, main_globals, None, 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: exec(code, run_globals) 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1090, in deploy_daemon 2026-03-09T20:19:28.542 INFO:teuthology.orchestra.run.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1237, in deploy_daemon_units 2026-03-09T20:19:28.543 INFO:teuthology.orchestra.run.vm01.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T20:19:28.543 INFO:teuthology.orchestra.run.vm01.stdout:RuntimeError: Failed command: systemctl start ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog: Job for ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service failed because the control process exited with error code. 2026-03-09T20:19:28.543 INFO:teuthology.orchestra.run.vm01.stdout:See "systemctl status ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" and "journalctl -xeu ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@iscsi.foo.vm01.zbymog.service" for details. 2026-03-09T20:19:28.596 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | length == 1'"'"'' 2026-03-09T20:19:28.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:28 vm06.local ceph-mon[81259]: from='client.54622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:29.197 INFO:teuthology.orchestra.run.vm01.stdout:true 2026-03-09T20:19:29.241 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | keys'"'"' | grep $sha1' 2026-03-09T20:19:29.463 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:29 vm01.local ceph-mon[97066]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:29.463 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:29 vm01.local ceph-mon[97066]: from='client.54634 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:29.463 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:29 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/3057297540' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:19:29.463 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:29 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/3879312359' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:19:29.464 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:29 vm01.local ceph-mon[98877]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:29.464 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:29 vm01.local ceph-mon[98877]: from='client.54634 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:29.464 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:29 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/3057297540' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:19:29.464 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:29 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/3879312359' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:19:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:29 vm06.local ceph-mon[81259]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:29 vm06.local ceph-mon[81259]: from='client.54634 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:29 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/3057297540' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T20:19:29.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:29 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/3879312359' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:19:30.167 INFO:teuthology.orchestra.run.vm01.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T20:19:30.209 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls | grep '"'"'^osd '"'"'' 2026-03-09T20:19:30.745 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:30 vm01.local ceph-mon[97066]: from='client.? 192.168.123.101:0/34197179' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:19:30.745 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:30 vm01.local ceph-mon[98877]: from='client.? 192.168.123.101:0/34197179' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:19:30.746 INFO:teuthology.orchestra.run.vm01.stdout:osd 8 2m ago - 2026-03-09T20:19:30.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:30 vm06.local ceph-mon[81259]: from='client.? 192.168.123.101:0/34197179' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T20:19:31.323 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T20:19:31.328 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm01.local 2026-03-09T20:19:31.333 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- bash -c 'ceph orch upgrade ls' 2026-03-09T20:19:31.820 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:31 vm01.local ceph-mon[97066]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:31.820 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:31 vm01.local ceph-mon[98877]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:32.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:31 vm06.local ceph-mon[81259]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:32.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:32 vm06.local ceph-mon[81259]: from='client.44649 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:32.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:32 vm06.local ceph-mon[81259]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:32.842 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:32 vm06.local ceph-mon[81259]: from='client.44655 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:32.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:32 vm01.local ceph-mon[97066]: from='client.44649 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:32.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:32 vm01.local ceph-mon[97066]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:32.878 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:32 vm01.local ceph-mon[97066]: from='client.44655 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:32.879 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:32 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: ::ffff:192.168.123.106 - - [09/Mar/2026:20:19:32] "GET /metrics HTTP/1.1" 200 38044 "" "Prometheus/2.51.0" 2026-03-09T20:19:32.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:32 vm01.local ceph-mon[98877]: from='client.44649 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:32.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:32 vm01.local ceph-mon[98877]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:32.879 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:32 vm01.local ceph-mon[98877]: from='client.44655 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:33.366 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "image": "quay.io/ceph/ceph", 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "registry": "quay.io", 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "bare_image": "ceph/ceph", 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "versions": [ 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "20.2.0", 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "20.1.1", 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "20.1.0", 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "19.2.3", 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "19.2.2", 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "19.2.1", 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: "19.2.0" 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-03-09T20:19:33.367 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-09T20:19:33.422 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0' 2026-03-09T20:19:35.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:34 vm06.local ceph-mon[81259]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:35.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:34 vm06.local ceph-mon[81259]: from='client.44661 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:35.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:34 vm06.local ceph-mon[81259]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:35.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:34 vm01.local ceph-mon[98877]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:35.128 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:34 vm01.local ceph-mon[98877]: from='client.44661 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:35.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:34 vm01.local ceph-mon[98877]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:35.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:34 vm01.local ceph-mon[97066]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T20:19:35.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:34 vm01.local ceph-mon[97066]: from='client.44661 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:35.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:34 vm01.local ceph-mon[97066]: from='client.54580 -' entity='client.iscsi.foo.vm01.zbymog' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T20:19:35.388 INFO:teuthology.orchestra.run.vm01.stdout: "16.2.0", 2026-03-09T20:19:35.655 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2' 2026-03-09T20:19:35.924 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:35 vm01.local ceph-mon[98877]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:19:35.924 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:35 vm01.local ceph-mon[97066]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:19:36.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:35 vm06.local ceph-mon[81259]: from='mgr.44103 192.168.123.101:0/1405861241' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T20:19:37.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:36 vm06.local ceph-mon[81259]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:37.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:36 vm06.local ceph-mon[81259]: from='client.44667 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:37.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:36 vm01.local ceph-mon[97066]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:37.129 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:36 vm01.local ceph-mon[97066]: from='client.44667 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:37.129 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:37 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:37.024Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:37.129 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:37 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:37.025Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:37.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:36 vm01.local ceph-mon[98877]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 288 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T20:19:37.129 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:36 vm01.local ceph-mon[98877]: from='client.44667 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T20:19:37.556 INFO:teuthology.orchestra.run.vm01.stdout: "v16.2.2", 2026-03-09T20:19:37.557 INFO:teuthology.orchestra.run.vm01.stdout: "v16.2.2-20210505", 2026-03-09T20:19:37.608 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-09T20:19:37.610 INFO:tasks.cephadm:Teardown begin 2026-03-09T20:19:37.611 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T20:19:37.639 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T20:19:37.667 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-09T20:19:37.667 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 -- ceph mgr module disable cephadm 2026-03-09T20:19:38.212 INFO:teuthology.orchestra.run.vm01.stderr:Error: statfs /etc/ceph/ceph.conf: no such file or directory 2026-03-09T20:19:38.233 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-09T20:19:38.233 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-09T20:19:38.233 DEBUG:teuthology.orchestra.run.vm01:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T20:19:38.250 DEBUG:teuthology.orchestra.run.vm06:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T20:19:38.266 INFO:tasks.cephadm:Stopping all daemons... 2026-03-09T20:19:38.266 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-09T20:19:38.266 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.a 2026-03-09T20:19:38.583 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:38 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:38] ENGINE Bus STOPPING 2026-03-09T20:19:38.583 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:38 vm01.local systemd[1]: Stopping Ceph mon.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:19:38.583 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:38 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a[97062]: 2026-03-09T20:19:38.414+0000 7f585f332640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:19:38.583 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 09 20:19:38 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-a[97062]: 2026-03-09T20:19:38.415+0000 7f585f332640 -1 mon.a@0(leader) e4 *** Got Signal Terminated *** 2026-03-09T20:19:38.684 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.a.service' 2026-03-09T20:19:38.722 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:19:38.722 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-09T20:19:38.722 INFO:tasks.cephadm.mon.b:Stopping mon.c... 2026-03-09T20:19:38.723 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.c 2026-03-09T20:19:38.867 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:38 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:38] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:19:38.867 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:38 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:38] ENGINE Bus STOPPED 2026-03-09T20:19:38.867 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:38 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:38] ENGINE Bus STARTING 2026-03-09T20:19:38.867 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:38 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:38] ENGINE Serving on http://:::9283 2026-03-09T20:19:38.867 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:38 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:38] ENGINE Bus STARTED 2026-03-09T20:19:38.868 INFO:journalctl@ceph.mon.c.vm01.stdout:Mar 09 20:19:38 vm01.local systemd[1]: Stopping Ceph mon.c for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:19:39.051 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.c.service' 2026-03-09T20:19:39.083 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:19:39.083 INFO:tasks.cephadm.mon.b:Stopped mon.c 2026-03-09T20:19:39.083 INFO:tasks.cephadm.mon.b:Stopping mon.b... 2026-03-09T20:19:39.083 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.b 2026-03-09T20:19:39.128 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:38 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:38] ENGINE Bus STOPPING 2026-03-09T20:19:39.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:39 vm06.local systemd[1]: Stopping Ceph mon.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:19:39.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:39 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b[81255]: 2026-03-09T20:19:39.280+0000 7f027e14e640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:19:39.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 09 20:19:39 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mon-b[81255]: 2026-03-09T20:19:39.280+0000 7f027e14e640 -1 mon.b@2(peon) e4 *** Got Signal Terminated *** 2026-03-09T20:19:39.610 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mon.b.service' 2026-03-09T20:19:39.628 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:39] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T20:19:39.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:39] ENGINE Bus STOPPED 2026-03-09T20:19:39.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:39] ENGINE Bus STARTING 2026-03-09T20:19:39.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:39] ENGINE Serving on http://:::9283 2026-03-09T20:19:39.629 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: [09/Mar/2026:20:19:39] ENGINE Bus STARTED 2026-03-09T20:19:39.645 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:19:39.645 INFO:tasks.cephadm.mon.b:Stopped mon.b 2026-03-09T20:19:39.645 INFO:tasks.cephadm.mgr.y:Stopping mgr.y... 2026-03-09T20:19:39.645 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.y 2026-03-09T20:19:39.953 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local systemd[1]: Stopping Ceph mgr.y for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:19:39.953 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:19:39.782+0000 7f137641a640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mgr -n mgr.y -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:19:39.953 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y[83562]: 2026-03-09T20:19:39.782+0000 7f137641a640 -1 mgr handle_mgr_signal *** Got signal Terminated *** 2026-03-09T20:19:39.953 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local podman[132580]: 2026-03-09 20:19:39.838425225 +0000 UTC m=+0.071930505 container died 8f1a7cbf4da3092e47351e64a82179e2b8f2bf9eb6beee1710c9c5e77d84cd16 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0) 2026-03-09T20:19:39.953 INFO:journalctl@ceph.mgr.y.vm01.stdout:Mar 09 20:19:39 vm01.local podman[132580]: 2026-03-09 20:19:39.953432997 +0000 UTC m=+0.186938287 container remove 8f1a7cbf4da3092e47351e64a82179e2b8f2bf9eb6beee1710c9c5e77d84cd16 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-y, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:19:40.021 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.y.service' 2026-03-09T20:19:40.054 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:19:40.054 INFO:tasks.cephadm.mgr.y:Stopped mgr.y 2026-03-09T20:19:40.054 INFO:tasks.cephadm.mgr.x:Stopping mgr.x... 2026-03-09T20:19:40.054 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x 2026-03-09T20:19:40.089 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:19:40 vm06.local systemd[1]: Stopping Ceph mgr.x for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:19:40.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:19:40 vm06.local podman[108768]: 2026-03-09 20:19:40.202969153 +0000 UTC m=+0.062998134 container died 7af24f632a8bec68de0b6a5169a17c06ced05f63d7a76ef65cb275913ba760e9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:19:40.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:19:40 vm06.local podman[108768]: 2026-03-09 20:19:40.289504199 +0000 UTC m=+0.149533180 container remove 7af24f632a8bec68de0b6a5169a17c06ced05f63d7a76ef65cb275913ba760e9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x, ceph=True, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0) 2026-03-09T20:19:40.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:19:40 vm06.local bash[108768]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-mgr-x 2026-03-09T20:19:40.342 INFO:journalctl@ceph.mgr.x.vm06.stdout:Mar 09 20:19:40 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-09T20:19:40.398 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@mgr.x.service' 2026-03-09T20:19:40.471 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:19:40.471 INFO:tasks.cephadm.mgr.x:Stopped mgr.x 2026-03-09T20:19:40.471 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-09T20:19:40.471 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.0 2026-03-09T20:19:40.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:40 vm01.local systemd[1]: Stopping Ceph osd.0 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:19:40.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:40 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[104408]: 2026-03-09T20:19:40.646+0000 7f855b186640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:19:40.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:40 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[104408]: 2026-03-09T20:19:40.646+0000 7f855b186640 -1 osd.0 143 *** Got signal Terminated *** 2026-03-09T20:19:40.878 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:40 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0[104408]: 2026-03-09T20:19:40.646+0000 7f855b186640 -1 osd.0 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:19:45.962 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:45 vm01.local podman[132683]: 2026-03-09 20:19:45.675503968 +0000 UTC m=+5.110077015 container died 53027cc5a47f8251440af90d495629dac4b6977f9e62cff997101284853f7a3a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223) 2026-03-09T20:19:45.962 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:45 vm01.local podman[132683]: 2026-03-09 20:19:45.718733491 +0000 UTC m=+5.153306538 container remove 53027cc5a47f8251440af90d495629dac4b6977f9e62cff997101284853f7a3a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:19:45.962 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:45 vm01.local bash[132683]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0 2026-03-09T20:19:45.962 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:45 vm01.local podman[132748]: 2026-03-09 20:19:45.86950891 +0000 UTC m=+0.017325870 container create b4b3613235827b2900bfb0069de0d9fd7d47f16c33bf584c19c29795f201b733 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:19:45.962 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:45 vm01.local podman[132748]: 2026-03-09 20:19:45.913405703 +0000 UTC m=+0.061222673 container init b4b3613235827b2900bfb0069de0d9fd7d47f16c33bf584c19c29795f201b733 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True) 2026-03-09T20:19:45.962 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:45 vm01.local podman[132748]: 2026-03-09 20:19:45.916686213 +0000 UTC m=+0.064503173 container start b4b3613235827b2900bfb0069de0d9fd7d47f16c33bf584c19c29795f201b733 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-09T20:19:45.962 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:45 vm01.local podman[132748]: 2026-03-09 20:19:45.920304484 +0000 UTC m=+0.068121435 container attach b4b3613235827b2900bfb0069de0d9fd7d47f16c33bf584c19c29795f201b733 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-0-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True) 2026-03-09T20:19:45.962 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 09 20:19:45 vm01.local podman[132748]: 2026-03-09 20:19:45.863341937 +0000 UTC m=+0.011158897 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:19:46.079 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.0.service' 2026-03-09T20:19:46.116 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:19:46.116 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-09T20:19:46.116 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-09T20:19:46.116 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.1 2026-03-09T20:19:46.269 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:46 vm01.local systemd[1]: Stopping Ceph osd.1 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:19:46.628 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:46 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[108655]: 2026-03-09T20:19:46.268+0000 7feac6c30640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:19:46.628 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:46 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[108655]: 2026-03-09T20:19:46.268+0000 7feac6c30640 -1 osd.1 143 *** Got signal Terminated *** 2026-03-09T20:19:46.628 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:46 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1[108655]: 2026-03-09T20:19:46.268+0000 7feac6c30640 -1 osd.1 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:19:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:47 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:47.025Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:47.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:47 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:47.026Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:51.572 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:51 vm01.local podman[132843]: 2026-03-09 20:19:51.2994986 +0000 UTC m=+5.045045615 container died 9a6b315e40321ef626ac130d791d23fc529cd645495fdb9276f9238c54343824 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:19:51.572 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:51 vm01.local podman[132843]: 2026-03-09 20:19:51.322756873 +0000 UTC m=+5.068303879 container remove 9a6b315e40321ef626ac130d791d23fc529cd645495fdb9276f9238c54343824 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1, CEPH_REF=squid, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0) 2026-03-09T20:19:51.572 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:51 vm01.local bash[132843]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1 2026-03-09T20:19:51.572 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:51 vm01.local podman[132924]: 2026-03-09 20:19:51.480347108 +0000 UTC m=+0.017329617 container create 4dc0dbbab3e7ef7fa01c4b6fd0b2840dbb3bfd17a9f21e49123000f8a8b61e49 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T20:19:51.572 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:51 vm01.local podman[132924]: 2026-03-09 20:19:51.527922796 +0000 UTC m=+0.064905326 container init 4dc0dbbab3e7ef7fa01c4b6fd0b2840dbb3bfd17a9f21e49123000f8a8b61e49 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20260223, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS) 2026-03-09T20:19:51.572 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:51 vm01.local podman[132924]: 2026-03-09 20:19:51.530924223 +0000 UTC m=+0.067906743 container start 4dc0dbbab3e7ef7fa01c4b6fd0b2840dbb3bfd17a9f21e49123000f8a8b61e49 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20260223) 2026-03-09T20:19:51.572 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:51 vm01.local podman[132924]: 2026-03-09 20:19:51.532280121 +0000 UTC m=+0.069262641 container attach 4dc0dbbab3e7ef7fa01c4b6fd0b2840dbb3bfd17a9f21e49123000f8a8b61e49 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-1-deactivate, ceph=True, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:19:51.572 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 09 20:19:51 vm01.local podman[132924]: 2026-03-09 20:19:51.473431153 +0000 UTC m=+0.010413663 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:19:51.686 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.1.service' 2026-03-09T20:19:51.718 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:19:51.719 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-09T20:19:51.719 INFO:tasks.cephadm.osd.2:Stopping osd.2... 2026-03-09T20:19:51.719 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.2 2026-03-09T20:19:51.864 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:19:51 vm01.local systemd[1]: Stopping Ceph osd.2 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:19:52.128 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:19:51 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[114120]: 2026-03-09T20:19:51.863+0000 7fbb59700640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:19:52.128 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:19:51 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[114120]: 2026-03-09T20:19:51.863+0000 7fbb59700640 -1 osd.2 143 *** Got signal Terminated *** 2026-03-09T20:19:52.128 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:19:51 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2[114120]: 2026-03-09T20:19:51.863+0000 7fbb59700640 -1 osd.2 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:19:57.154 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:19:56 vm01.local podman[133019]: 2026-03-09 20:19:56.888659805 +0000 UTC m=+5.037235988 container died 3c062f84ee5cc99bca4a88c22c6a7189c1ae4bdf5f946461fcededcb79ad006c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True) 2026-03-09T20:19:57.154 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:19:56 vm01.local podman[133019]: 2026-03-09 20:19:56.975960987 +0000 UTC m=+5.124537170 container remove 3c062f84ee5cc99bca4a88c22c6a7189c1ae4bdf5f946461fcededcb79ad006c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:19:57.154 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:19:56 vm01.local bash[133019]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2 2026-03-09T20:19:57.154 INFO:journalctl@ceph.osd.2.vm01.stdout:Mar 09 20:19:57 vm01.local podman[133082]: 2026-03-09 20:19:57.131255524 +0000 UTC m=+0.018123342 container create ad4119c790b1fec9165434bb9e54864a02782175d9b9c1929922a84ab3b126fa (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-2-deactivate, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:19:57.154 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:57.026Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:57.154 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:19:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:19:57.026Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:19:57.352 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.2.service' 2026-03-09T20:19:57.390 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:19:57.390 INFO:tasks.cephadm.osd.2:Stopped osd.2 2026-03-09T20:19:57.390 INFO:tasks.cephadm.osd.3:Stopping osd.3... 2026-03-09T20:19:57.390 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.3 2026-03-09T20:19:57.878 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:19:57 vm01.local systemd[1]: Stopping Ceph osd.3 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:19:57.878 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:19:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[119620]: 2026-03-09T20:19:57.542+0000 7fdf67514640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:19:57.878 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:19:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[119620]: 2026-03-09T20:19:57.542+0000 7fdf67514640 -1 osd.3 143 *** Got signal Terminated *** 2026-03-09T20:19:57.878 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:19:57 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3[119620]: 2026-03-09T20:19:57.542+0000 7fdf67514640 -1 osd.3 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:20:02.852 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:20:02 vm01.local podman[133178]: 2026-03-09 20:20:02.576345849 +0000 UTC m=+5.049217274 container died b4e0b13e46c0d30a310e1708844f9ed693863f692dcce32de2dccf8283bbfc6f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS) 2026-03-09T20:20:02.852 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:20:02 vm01.local podman[133178]: 2026-03-09 20:20:02.603220691 +0000 UTC m=+5.076092116 container remove b4e0b13e46c0d30a310e1708844f9ed693863f692dcce32de2dccf8283bbfc6f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:20:02.852 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:20:02 vm01.local bash[133178]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3 2026-03-09T20:20:02.852 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:20:02 vm01.local podman[133245]: 2026-03-09 20:20:02.759568379 +0000 UTC m=+0.016037549 container create 9caf93781ff50c938db61f5dd96bf3939ef7772dd7070ad8876b9ed6f3184e04 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:20:02.852 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:20:02 vm01.local podman[133245]: 2026-03-09 20:20:02.80584722 +0000 UTC m=+0.062316389 container init 9caf93781ff50c938db61f5dd96bf3939ef7772dd7070ad8876b9ed6f3184e04 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:20:02.852 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:20:02 vm01.local podman[133245]: 2026-03-09 20:20:02.80904762 +0000 UTC m=+0.065516799 container start 9caf93781ff50c938db61f5dd96bf3939ef7772dd7070ad8876b9ed6f3184e04 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T20:20:02.852 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:20:02 vm01.local podman[133245]: 2026-03-09 20:20:02.810119837 +0000 UTC m=+0.066589016 container attach 9caf93781ff50c938db61f5dd96bf3939ef7772dd7070ad8876b9ed6f3184e04 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-3-deactivate, org.label-schema.build-date=20260223, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-09T20:20:02.852 INFO:journalctl@ceph.osd.3.vm01.stdout:Mar 09 20:20:02 vm01.local podman[133245]: 2026-03-09 20:20:02.753526531 +0000 UTC m=+0.009995710 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T20:20:02.976 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.3.service' 2026-03-09T20:20:03.015 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:20:03.015 INFO:tasks.cephadm.osd.3:Stopped osd.3 2026-03-09T20:20:03.015 INFO:tasks.cephadm.osd.4:Stopping osd.4... 2026-03-09T20:20:03.015 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.4 2026-03-09T20:20:03.342 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:03 vm06.local systemd[1]: Stopping Ceph osd.4 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:20:03.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:03 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[89398]: 2026-03-09T20:20:03.121+0000 7fa2085f2640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:20:03.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:03 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[89398]: 2026-03-09T20:20:03.121+0000 7fa2085f2640 -1 osd.4 143 *** Got signal Terminated *** 2026-03-09T20:20:03.343 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:03 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[89398]: 2026-03-09T20:20:03.121+0000 7fa2085f2640 -1 osd.4 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:20:05.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:20:05 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:20:05.405Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=mgr-prometheus\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:20:05.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:20:05 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:20:05.406Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nvmeof msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=nvmeof\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:20:05.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:20:05 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:20:05.406Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=node msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=node-exporter\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:20:05.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:20:05 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:20:05.406Z caller=refresh.go:90 level=error component="discovery manager notify" discovery=http config=config-0 msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=alertmanager\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:20:05.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:20:05 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:20:05.406Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph-exporter msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=ceph-exporter\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:20:05.842 INFO:journalctl@ceph.prometheus.a.vm06.stdout:Mar 09 20:20:05 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-prometheus-a[77911]: ts=2026-03-09T20:20:05.406Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nfs msg="Unable to refresh target groups" err="Get \"http://192.168.123.101:8765/sd/prometheus/sd-config?service=nfs\": dial tcp 192.168.123.101:8765: connect: connection refused" 2026-03-09T20:20:07.092 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:06 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[89398]: 2026-03-09T20:20:06.781+0000 7fa204c0b640 -1 osd.4 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:40.811553+0000 front 2026-03-09T20:19:40.811530+0000 (oldest deadline 2026-03-09T20:20:06.710889+0000) 2026-03-09T20:20:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:07 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:20:07.026Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:20:07.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:07 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:20:07.027Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:20:08.092 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:07 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4[89398]: 2026-03-09T20:20:07.749+0000 7fa204c0b640 -1 osd.4 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:40.811553+0000 front 2026-03-09T20:19:40.811530+0000 (oldest deadline 2026-03-09T20:20:06.710889+0000) 2026-03-09T20:20:08.372 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:08 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:20:08.120+0000 7f55b8436640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:44.050905+0000 front 2026-03-09T20:19:44.050769+0000 (oldest deadline 2026-03-09T20:20:07.550301+0000) 2026-03-09T20:20:08.372 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:08 vm06.local podman[108872]: 2026-03-09 20:20:08.147832562 +0000 UTC m=+5.041494742 container died ddc3d6821c0afd814f2655a9f3f56fe336ccf36df173414588cf1f2d8e5a26c2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:20:08.372 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:08 vm06.local podman[108872]: 2026-03-09 20:20:08.166272927 +0000 UTC m=+5.059935117 container remove ddc3d6821c0afd814f2655a9f3f56fe336ccf36df173414588cf1f2d8e5a26c2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:20:08.372 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:08 vm06.local bash[108872]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4 2026-03-09T20:20:08.372 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:08 vm06.local podman[108950]: 2026-03-09 20:20:08.329214524 +0000 UTC m=+0.030413206 container create 75bf40e760a240303df9f1b07ab471e14fda1307a4ec01a51888b71a1b140969 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.schema-version=1.0) 2026-03-09T20:20:08.372 INFO:journalctl@ceph.osd.4.vm06.stdout:Mar 09 20:20:08 vm06.local podman[108950]: 2026-03-09 20:20:08.368216292 +0000 UTC m=+0.069414974 container init 75bf40e760a240303df9f1b07ab471e14fda1307a4ec01a51888b71a1b140969 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-4-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0) 2026-03-09T20:20:08.530 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.4.service' 2026-03-09T20:20:08.564 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:20:08.564 INFO:tasks.cephadm.osd.4:Stopped osd.4 2026-03-09T20:20:08.564 INFO:tasks.cephadm.osd.5:Stopping osd.5... 2026-03-09T20:20:08.564 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.5 2026-03-09T20:20:09.080 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:08 vm06.local systemd[1]: Stopping Ceph osd.5 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:20:09.080 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:08 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:20:08.714+0000 7f55bbe1d640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:20:09.080 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:08 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:20:08.714+0000 7f55bbe1d640 -1 osd.5 143 *** Got signal Terminated *** 2026-03-09T20:20:09.080 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:08 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:20:08.714+0000 7f55bbe1d640 -1 osd.5 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:20:09.342 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:09 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:20:09.078+0000 7f55b8436640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:44.050905+0000 front 2026-03-09T20:19:44.050769+0000 (oldest deadline 2026-03-09T20:20:07.550301+0000) 2026-03-09T20:20:10.342 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:09 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:09.966+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:10.342 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:10 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:20:10.037+0000 7f55b8436640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:44.050905+0000 front 2026-03-09T20:19:44.050769+0000 (oldest deadline 2026-03-09T20:20:07.550301+0000) 2026-03-09T20:20:11.342 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:10 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:10.949+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:11.342 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:11 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:20:11.015+0000 7f55b8436640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:44.050905+0000 front 2026-03-09T20:19:44.050769+0000 (oldest deadline 2026-03-09T20:20:07.550301+0000) 2026-03-09T20:20:12.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:11 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:11.868+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:12.342 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:11 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:11.936+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:12.342 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:12 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:20:12.055+0000 7f55b8436640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:44.050905+0000 front 2026-03-09T20:19:44.050769+0000 (oldest deadline 2026-03-09T20:20:07.550301+0000) 2026-03-09T20:20:13.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:12 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:12.873+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:13.342 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:12 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:12.907+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:13.342 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:13 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5[93549]: 2026-03-09T20:20:13.069+0000 7f55b8436640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:44.050905+0000 front 2026-03-09T20:19:44.050769+0000 (oldest deadline 2026-03-09T20:20:07.550301+0000) 2026-03-09T20:20:14.004 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:13 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:13.830+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:14.004 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:13 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:13.946+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:14.004 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:13 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:13.946+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:49.108654+0000 front 2026-03-09T20:19:49.108760+0000 (oldest deadline 2026-03-09T20:20:13.208360+0000) 2026-03-09T20:20:14.004 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:13 vm06.local podman[109045]: 2026-03-09 20:20:13.738585353 +0000 UTC m=+5.038210051 container died f56302ede3ae609120bd5a3caae4a640abc196cc3f456264e7a8bbbc957c14eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS) 2026-03-09T20:20:14.004 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:13 vm06.local podman[109045]: 2026-03-09 20:20:13.769846956 +0000 UTC m=+5.069471654 container remove f56302ede3ae609120bd5a3caae4a640abc196cc3f456264e7a8bbbc957c14eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:20:14.004 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:13 vm06.local bash[109045]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5 2026-03-09T20:20:14.004 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:13 vm06.local podman[109110]: 2026-03-09 20:20:13.928655046 +0000 UTC m=+0.018377187 container create 886c2347244383393a45f01645b1875751090d607e00bae67f05ffd079850ec2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T20:20:14.005 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:13 vm06.local podman[109110]: 2026-03-09 20:20:13.976928108 +0000 UTC m=+0.066650259 container init 886c2347244383393a45f01645b1875751090d607e00bae67f05ffd079850ec2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T20:20:14.005 INFO:journalctl@ceph.osd.5.vm06.stdout:Mar 09 20:20:13 vm06.local podman[109110]: 2026-03-09 20:20:13.980477913 +0000 UTC m=+0.070200043 container start 886c2347244383393a45f01645b1875751090d607e00bae67f05ffd079850ec2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-5-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-09T20:20:14.139 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.5.service' 2026-03-09T20:20:14.173 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:20:14.173 INFO:tasks.cephadm.osd.5:Stopped osd.5 2026-03-09T20:20:14.173 INFO:tasks.cephadm.osd.6:Stopping osd.6... 2026-03-09T20:20:14.173 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.6 2026-03-09T20:20:14.326 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:14 vm06.local systemd[1]: Stopping Ceph osd.6 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:20:14.592 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:14 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:14.325+0000 7f9415bb9640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:20:14.592 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:14 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:14.325+0000 7f9415bb9640 -1 osd.6 143 *** Got signal Terminated *** 2026-03-09T20:20:14.592 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:14 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:14.325+0000 7f9415bb9640 -1 osd.6 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:20:15.092 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:14 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:14.820+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:15.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:14 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:14.936+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:15.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:14 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:14.936+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:49.108654+0000 front 2026-03-09T20:19:49.108760+0000 (oldest deadline 2026-03-09T20:20:13.208360+0000) 2026-03-09T20:20:16.092 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:15.789+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:16.092 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:15.789+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:15.928+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:16.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:15 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:15.928+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:49.108654+0000 front 2026-03-09T20:19:49.108760+0000 (oldest deadline 2026-03-09T20:20:13.208360+0000) 2026-03-09T20:20:17.092 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:16.782+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:17.092 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:16.782+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:17.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:16.922+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:17.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:16 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:16.922+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:49.108654+0000 front 2026-03-09T20:19:49.108760+0000 (oldest deadline 2026-03-09T20:20:13.208360+0000) 2026-03-09T20:20:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:20:17.027Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:20:17.378 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:17 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:20:17.028Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:20:18.092 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:17 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:17.827+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:18.092 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:17 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:17.827+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:18.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:17 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:17.911+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:18.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:17 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:17.911+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:49.108654+0000 front 2026-03-09T20:19:49.108760+0000 (oldest deadline 2026-03-09T20:20:13.208360+0000) 2026-03-09T20:20:18.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:17 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:17.911+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6822 osd.2 since back 2026-03-09T20:19:53.208813+0000 front 2026-03-09T20:19:53.208676+0000 (oldest deadline 2026-03-09T20:20:17.308597+0000) 2026-03-09T20:20:19.092 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:18 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:18.838+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:19.092 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:18 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:18.838+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:19.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:18 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:18.873+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.608667+0000 front 2026-03-09T20:19:45.608640+0000 (oldest deadline 2026-03-09T20:20:09.108145+0000) 2026-03-09T20:20:19.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:18 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:18.873+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:49.108654+0000 front 2026-03-09T20:19:49.108760+0000 (oldest deadline 2026-03-09T20:20:13.208360+0000) 2026-03-09T20:20:19.092 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:18 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6[97320]: 2026-03-09T20:20:18.873+0000 7f94121d2640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.101:6822 osd.2 since back 2026-03-09T20:19:53.208813+0000 front 2026-03-09T20:19:53.208676+0000 (oldest deadline 2026-03-09T20:20:17.308597+0000) 2026-03-09T20:20:19.638 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:19 vm06.local podman[109203]: 2026-03-09 20:20:19.358343653 +0000 UTC m=+5.046807282 container died 848df75f9503c4a0162a5e598f6553a217482cf195a830344d60376e6c911019 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T20:20:19.638 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:19 vm06.local podman[109203]: 2026-03-09 20:20:19.399697215 +0000 UTC m=+5.088160834 container remove 848df75f9503c4a0162a5e598f6553a217482cf195a830344d60376e6c911019 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:20:19.638 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:19 vm06.local bash[109203]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6 2026-03-09T20:20:19.638 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:19 vm06.local podman[109272]: 2026-03-09 20:20:19.548894306 +0000 UTC m=+0.020680649 container create df77dd346c5222a37aae81ef62657605761c9ea8616a5d19b88ed93edf797ac5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T20:20:19.639 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:19 vm06.local podman[109272]: 2026-03-09 20:20:19.596817864 +0000 UTC m=+0.068604217 container init df77dd346c5222a37aae81ef62657605761c9ea8616a5d19b88ed93edf797ac5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , ceph=True) 2026-03-09T20:20:19.639 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:19 vm06.local podman[109272]: 2026-03-09 20:20:19.59983455 +0000 UTC m=+0.071620893 container start df77dd346c5222a37aae81ef62657605761c9ea8616a5d19b88ed93edf797ac5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0) 2026-03-09T20:20:19.639 INFO:journalctl@ceph.osd.6.vm06.stdout:Mar 09 20:20:19 vm06.local podman[109272]: 2026-03-09 20:20:19.604892828 +0000 UTC m=+0.076679161 container attach df77dd346c5222a37aae81ef62657605761c9ea8616a5d19b88ed93edf797ac5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-6-deactivate, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T20:20:19.779 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.6.service' 2026-03-09T20:20:19.816 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:20:19.816 INFO:tasks.cephadm.osd.6:Stopped osd.6 2026-03-09T20:20:19.816 INFO:tasks.cephadm.osd.7:Stopping osd.7... 2026-03-09T20:20:19.816 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.7 2026-03-09T20:20:19.901 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:19 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:19.874+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:19.901 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:19 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:19.874+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:19.901 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:19 vm06.local systemd[1]: Stopping Ceph osd.7 for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:20:20.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:19 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:19.960+0000 7f4c74544640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T20:20:20.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:19 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:19.960+0000 7f4c74544640 -1 osd.7 143 *** Got signal Terminated *** 2026-03-09T20:20:20.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:19 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:19.960+0000 7f4c74544640 -1 osd.7 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T20:20:21.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:20 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:20.918+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:21.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:20 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:20.918+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:22.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:21 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:21.869+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:22.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:21 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:21.869+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:22.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:21 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:21.869+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6822 osd.2 since back 2026-03-09T20:19:55.635152+0000 front 2026-03-09T20:19:55.635181+0000 (oldest deadline 2026-03-09T20:20:21.534712+0000) 2026-03-09T20:20:23.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:22 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:22.901+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:23.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:22 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:22.901+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:23.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:22 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:22.901+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6822 osd.2 since back 2026-03-09T20:19:55.635152+0000 front 2026-03-09T20:19:55.635181+0000 (oldest deadline 2026-03-09T20:20:21.534712+0000) 2026-03-09T20:20:24.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:23 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:23.855+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:24.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:23 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:23.855+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:24.342 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:23 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:23.855+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6822 osd.2 since back 2026-03-09T20:19:55.635152+0000 front 2026-03-09T20:19:55.635181+0000 (oldest deadline 2026-03-09T20:20:21.534712+0000) 2026-03-09T20:20:25.146 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:24 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:24.871+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6806 osd.0 since back 2026-03-09T20:19:45.034420+0000 front 2026-03-09T20:19:45.034475+0000 (oldest deadline 2026-03-09T20:20:10.934121+0000) 2026-03-09T20:20:25.146 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:24 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:24.871+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6814 osd.1 since back 2026-03-09T20:19:50.934792+0000 front 2026-03-09T20:19:50.934696+0000 (oldest deadline 2026-03-09T20:20:15.634455+0000) 2026-03-09T20:20:25.146 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:24 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7[101086]: 2026-03-09T20:20:24.871+0000 7f4c70b5d640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.101:6822 osd.2 since back 2026-03-09T20:19:55.635152+0000 front 2026-03-09T20:19:55.635181+0000 (oldest deadline 2026-03-09T20:20:21.534712+0000) 2026-03-09T20:20:25.146 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:24 vm06.local podman[109367]: 2026-03-09 20:20:24.98985459 +0000 UTC m=+5.042533103 container died 144cd471498b1274f35d1fc96dbac65e0f5c3812214226f7bc4a1ea9ad009c5f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223) 2026-03-09T20:20:25.146 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:25 vm06.local podman[109367]: 2026-03-09 20:20:25.015188378 +0000 UTC m=+5.067866900 container remove 144cd471498b1274f35d1fc96dbac65e0f5c3812214226f7bc4a1ea9ad009c5f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223) 2026-03-09T20:20:25.146 INFO:journalctl@ceph.osd.7.vm06.stdout:Mar 09 20:20:25 vm06.local bash[109367]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-osd-7 2026-03-09T20:20:25.353 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@osd.7.service' 2026-03-09T20:20:25.387 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:20:25.388 INFO:tasks.cephadm.osd.7:Stopped osd.7 2026-03-09T20:20:25.388 INFO:tasks.cephadm.prometheus.a:Stopping prometheus.a... 2026-03-09T20:20:25.388 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a 2026-03-09T20:20:25.615 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@prometheus.a.service' 2026-03-09T20:20:25.643 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T20:20:25.644 INFO:tasks.cephadm.prometheus.a:Stopped prometheus.a 2026-03-09T20:20:25.644 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 --force --keep-logs 2026-03-09T20:20:26.940 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 20:20:26 vm01.local systemd[1]: Stopping Ceph node-exporter.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:20:27.216 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 20:20:27 vm01.local podman[133650]: 2026-03-09 20:20:27.019189061 +0000 UTC m=+0.023906488 container died 6e21ac8930a542ebf1540c7598780de18e2111a3e03664d148f32cba65bd9a44 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T20:20:27.216 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 20:20:27 vm01.local podman[133650]: 2026-03-09 20:20:27.057469683 +0000 UTC m=+0.062187109 container remove 6e21ac8930a542ebf1540c7598780de18e2111a3e03664d148f32cba65bd9a44 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T20:20:27.216 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 20:20:27 vm01.local bash[133650]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-a 2026-03-09T20:20:27.216 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 20:20:27 vm01.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-09T20:20:27.216 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 20:20:27 vm01.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-09T20:20:27.216 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 20:20:27 vm01.local systemd[1]: Stopped Ceph node-exporter.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:20:27.216 INFO:journalctl@ceph.node-exporter.a.vm01.stdout:Mar 09 20:20:27 vm01.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.a.service: Consumed 1.807s CPU time. 2026-03-09T20:20:27.216 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:20:27.028Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:20:27.216 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:20:27.029Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T20:20:27.594 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local systemd[1]: Stopping Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:20:27.594 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a[80952]: ts=2026-03-09T20:20:27.419Z caller=main.go:583 level=info msg="Received SIGTERM, exiting gracefully..." 2026-03-09T20:20:27.594 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local podman[133748]: 2026-03-09 20:20:27.423466684 +0000 UTC m=+0.025793017 container died 10eae0f056a0ac235a3454e02bb0833a478bd6a54070e110f6c772e5a491aed4 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T20:20:27.594 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local podman[133748]: 2026-03-09 20:20:27.441115618 +0000 UTC m=+0.043441951 container remove 10eae0f056a0ac235a3454e02bb0833a478bd6a54070e110f6c772e5a491aed4 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T20:20:27.594 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local podman[133748]: 2026-03-09 20:20:27.4423775 +0000 UTC m=+0.044703833 volume remove bcede7e8dc799f1d66ff6521154c2879291df650876c967668b9f1a3029ef399 2026-03-09T20:20:27.594 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local bash[133748]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-alertmanager-a 2026-03-09T20:20:27.594 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@alertmanager.a.service: Deactivated successfully. 2026-03-09T20:20:27.594 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local systemd[1]: Stopped Ceph alertmanager.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:20:27.594 INFO:journalctl@ceph.alertmanager.a.vm01.stdout:Mar 09 20:20:27 vm01.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@alertmanager.a.service: Consumed 1.724s CPU time. 2026-03-09T20:20:48.609 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 --force --keep-logs 2026-03-09T20:20:49.804 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:20:49 vm06.local systemd[1]: Stopping Ceph node-exporter.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:20:49.804 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:20:49 vm06.local podman[109899]: 2026-03-09 20:20:49.764732279 +0000 UTC m=+0.014708922 container died f65efb03618892fdf4ac7622b683f743103e11546b4a3c59bde24b4f9a956c4c (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T20:20:49.804 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:20:49 vm06.local podman[109899]: 2026-03-09 20:20:49.795323346 +0000 UTC m=+0.045299989 container remove f65efb03618892fdf4ac7622b683f743103e11546b4a3c59bde24b4f9a956c4c (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T20:20:49.804 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:20:49 vm06.local bash[109899]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-node-exporter-b 2026-03-09T20:20:49.804 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:20:49 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-09T20:20:50.150 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:20:49 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-09T20:20:50.150 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:20:49 vm06.local systemd[1]: Stopped Ceph node-exporter.b for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:20:50.150 INFO:journalctl@ceph.node-exporter.b.vm06.stdout:Mar 09 20:20:49 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@node-exporter.b.service: Consumed 1.863s CPU time. 2026-03-09T20:20:50.429 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local systemd[1]: Stopping Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3... 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=server t=2026-03-09T20:20:50.22939587Z level=info msg="Shutdown started" reason="System signal: terminated" 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=tracing t=2026-03-09T20:20:50.229848869Z level=info msg="Closing tracing" 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=grafana-apiserver t=2026-03-09T20:20:50.23011199Z level=info msg="StorageObjectCountTracker pruner is exiting" 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a[79620]: logger=ticker t=2026-03-09T20:20:50.230150934Z level=info msg=stopped last_tick=2026-03-09T20:20:50Z 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local podman[110036]: 2026-03-09 20:20:50.241311412 +0000 UTC m=+0.028770450 container died d2acbfd0f7924831703f7db53002b26329bef80d28e1c16c8d7fcabee53df8d0 (image=quay.io/ceph/grafana:10.4.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, maintainer=Grafana Labs ) 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local podman[110036]: 2026-03-09 20:20:50.274803951 +0000 UTC m=+0.062262989 container remove d2acbfd0f7924831703f7db53002b26329bef80d28e1c16c8d7fcabee53df8d0 (image=quay.io/ceph/grafana:10.4.0, name=ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a, maintainer=Grafana Labs ) 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local bash[110036]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3-grafana-a 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@grafana.a.service: Deactivated successfully. 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local systemd[1]: Stopped Ceph grafana.a for 0d36e446-1bf1-11f1-a427-9918c6d73ee3. 2026-03-09T20:20:50.430 INFO:journalctl@ceph.grafana.a.vm06.stdout:Mar 09 20:20:50 vm06.local systemd[1]: ceph-0d36e446-1bf1-11f1-a427-9918c6d73ee3@grafana.a.service: Consumed 4.147s CPU time. 2026-03-09T20:21:01.163 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T20:21:01.190 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T20:21:01.216 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-09T20:21:01.216 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/crash to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/628/remote/vm01/crash 2026-03-09T20:21:01.216 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/crash -- . 2026-03-09T20:21:01.256 INFO:teuthology.orchestra.run.vm01.stderr:tar: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/crash: Cannot open: No such file or directory 2026-03-09T20:21:01.256 INFO:teuthology.orchestra.run.vm01.stderr:tar: Error is not recoverable: exiting now 2026-03-09T20:21:01.257 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/crash to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/628/remote/vm06/crash 2026-03-09T20:21:01.257 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/crash -- . 2026-03-09T20:21:01.281 INFO:teuthology.orchestra.run.vm06.stderr:tar: /var/lib/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/crash: Cannot open: No such file or directory 2026-03-09T20:21:01.281 INFO:teuthology.orchestra.run.vm06.stderr:tar: Error is not recoverable: exiting now 2026-03-09T20:21:01.282 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-09T20:21:01.282 DEBUG:teuthology.orchestra.run.vm01:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_STRAY_DAEMON | egrep -v CEPHADM_FAILED_DAEMON | egrep -v CEPHADM_AGENT_DOWN | head -n 1 2026-03-09T20:21:01.323 INFO:tasks.cephadm:Compressing logs... 2026-03-09T20:21:01.323 DEBUG:teuthology.orchestra.run.vm01:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T20:21:01.366 DEBUG:teuthology.orchestra.run.vm06:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T20:21:01.388 INFO:teuthology.orchestra.run.vm01.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T20:21:01.389 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T20:21:01.389 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mon.a.log 2026-03-09T20:21:01.389 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.log 2026-03-09T20:21:01.391 INFO:teuthology.orchestra.run.vm06.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T20:21:01.391 INFO:teuthology.orchestra.run.vm06.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T20:21:01.392 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-volume.log 2026-03-09T20:21:01.395 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mon.a.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mgr.y.log 2026-03-09T20:21:01.395 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mon.b.log 2026-03-09T20:21:01.396 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.log: 92.1% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T20:21:01.396 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.audit.log 2026-03-09T20:21:01.400 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mgr.y.log: 93.8% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.log.gz 2026-03-09T20:21:01.402 INFO:teuthology.orchestra.run.vm06.stderr: 94.1% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-volume.log.gz 2026-03-09T20:21:01.403 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.cephadm.log 2026-03-09T20:21:01.403 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.audit.log 2026-03-09T20:21:01.404 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mon.b.log: /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.cephadm.log: 85.1% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.cephadm.log.gz 2026-03-09T20:21:01.404 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.log 2026-03-09T20:21:01.404 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.cephadm.log 2026-03-09T20:21:01.407 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.audit.log: 91.1% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.audit.log.gz 2026-03-09T20:21:01.408 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mgr.x.log 2026-03-09T20:21:01.408 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.audit.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-volume.log 2026-03-09T20:21:01.409 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.log: 88.6% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.log.gz 2026-03-09T20:21:01.410 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.4.log 2026-03-09T20:21:01.410 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.cephadm.log: 91.5% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.cephadm.log.gz 2026-03-09T20:21:01.414 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mon.c.log 2026-03-09T20:21:01.414 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mgr.x.log: 91.0% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T20:21:01.416 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-volume.log: 94.4% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph.audit.log.gz 2026-03-09T20:21:01.416 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.5.log 2026-03-09T20:21:01.421 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.0.log 2026-03-09T20:21:01.424 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.6.log 2026-03-09T20:21:01.428 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mon.c.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.1.log 2026-03-09T20:21:01.431 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.0.log: 94.3% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-volume.log.gz 2026-03-09T20:21:01.431 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.7.log 2026-03-09T20:21:01.438 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.2.log 2026-03-09T20:21:01.439 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-client.rgw.foo.vm06.lmiczb.log 2026-03-09T20:21:01.448 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.7.log: /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-client.rgw.foo.vm06.lmiczb.log: 76.6% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-client.rgw.foo.vm06.lmiczb.log.gz 2026-03-09T20:21:01.449 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.3.log 2026-03-09T20:21:01.461 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-client.rgw.foo.vm01.zyuelz.log 2026-03-09T20:21:01.469 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/tcmu-runner.log 2026-03-09T20:21:01.472 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-client.rgw.foo.vm01.zyuelz.log: 76.0% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-client.rgw.foo.vm01.zyuelz.log.gz 2026-03-09T20:21:01.476 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/tcmu-runner.log: 87.2% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/tcmu-runner.log.gz 2026-03-09T20:21:01.578 INFO:teuthology.orchestra.run.vm06.stderr: 90.4% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mgr.x.log.gz 2026-03-09T20:21:02.169 INFO:teuthology.orchestra.run.vm06.stderr: 92.5% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mon.b.log.gz 2026-03-09T20:21:02.218 INFO:teuthology.orchestra.run.vm01.stderr: 89.9% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mgr.y.log.gz 2026-03-09T20:21:02.652 INFO:teuthology.orchestra.run.vm01.stderr: 92.4% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mon.c.log.gz 2026-03-09T20:21:03.125 INFO:teuthology.orchestra.run.vm06.stderr: 93.5% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.6.log.gz 2026-03-09T20:21:03.422 INFO:teuthology.orchestra.run.vm06.stderr: 93.7% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.5.log.gz 2026-03-09T20:21:03.462 INFO:teuthology.orchestra.run.vm06.stderr: 94.2% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.7.log.gz 2026-03-09T20:21:03.522 INFO:teuthology.orchestra.run.vm06.stderr: 93.9% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.4.log.gz 2026-03-09T20:21:03.524 INFO:teuthology.orchestra.run.vm06.stderr: 2026-03-09T20:21:03.524 INFO:teuthology.orchestra.run.vm06.stderr:real 0m2.144s 2026-03-09T20:21:03.525 INFO:teuthology.orchestra.run.vm06.stderr:user 0m3.985s 2026-03-09T20:21:03.525 INFO:teuthology.orchestra.run.vm06.stderr:sys 0m0.222s 2026-03-09T20:21:03.536 INFO:teuthology.orchestra.run.vm01.stderr: 93.6% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.2.log.gz 2026-03-09T20:21:03.729 INFO:teuthology.orchestra.run.vm01.stderr: 91.2% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-mon.a.log.gz 2026-03-09T20:21:04.047 INFO:teuthology.orchestra.run.vm01.stderr: 93.8% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.0.log.gz 2026-03-09T20:21:04.102 INFO:teuthology.orchestra.run.vm01.stderr: 93.8% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.1.log.gz 2026-03-09T20:21:04.256 INFO:teuthology.orchestra.run.vm01.stderr: 93.8% -- replaced with /var/log/ceph/0d36e446-1bf1-11f1-a427-9918c6d73ee3/ceph-osd.3.log.gz 2026-03-09T20:21:04.258 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-09T20:21:04.258 INFO:teuthology.orchestra.run.vm01.stderr:real 0m2.880s 2026-03-09T20:21:04.258 INFO:teuthology.orchestra.run.vm01.stderr:user 0m4.706s 2026-03-09T20:21:04.259 INFO:teuthology.orchestra.run.vm01.stderr:sys 0m0.229s 2026-03-09T20:21:04.259 INFO:tasks.cephadm:Archiving logs... 2026-03-09T20:21:04.259 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/log/ceph to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/628/remote/vm01/log 2026-03-09T20:21:04.259 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T20:21:04.589 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/log/ceph to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/628/remote/vm06/log 2026-03-09T20:21:04.589 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T20:21:04.818 INFO:tasks.cephadm:Removing cluster... 2026-03-09T20:21:04.818 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 --force 2026-03-09T20:21:05.028 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 0d36e446-1bf1-11f1-a427-9918c6d73ee3 --force 2026-03-09T20:21:05.245 INFO:tasks.cephadm:Removing cephadm ... 2026-03-09T20:21:05.245 DEBUG:teuthology.orchestra.run.vm01:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T20:21:05.263 DEBUG:teuthology.orchestra.run.vm06:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T20:21:05.280 INFO:tasks.cephadm:Teardown complete 2026-03-09T20:21:05.280 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-09T20:21:05.300 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-09T20:21:05.300 DEBUG:teuthology.orchestra.run.vm01:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T20:21:05.305 DEBUG:teuthology.orchestra.run.vm06:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T20:21:05.319 INFO:teuthology.orchestra.run.vm01.stderr:bash: line 1: ntpq: command not found 2026-03-09T20:21:05.323 INFO:teuthology.orchestra.run.vm01.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T20:21:05.323 INFO:teuthology.orchestra.run.vm01.stdout:=============================================================================== 2026-03-09T20:21:05.323 INFO:teuthology.orchestra.run.vm01.stdout:^* node-4.infogral.is 2 8 377 92 +171us[ +172us] +/- 14ms 2026-03-09T20:21:05.323 INFO:teuthology.orchestra.run.vm01.stdout:^+ stratum2-2.NTP.TechFak.N> 2 7 377 27 +1205us[+1205us] +/- 18ms 2026-03-09T20:21:05.323 INFO:teuthology.orchestra.run.vm01.stdout:^- pve2.h4x-gamers.top 2 8 377 219 +50us[ +51us] +/- 36ms 2026-03-09T20:21:05.323 INFO:teuthology.orchestra.run.vm01.stdout:^+ time.cloudflare.com 3 8 377 28 -1088us[-1088us] +/- 14ms 2026-03-09T20:21:05.337 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-09T20:21:05.340 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T20:21:05.340 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-09T20:21:05.340 INFO:teuthology.orchestra.run.vm06.stdout:^* node-4.infogral.is 2 8 377 30 +184us[ +179us] +/- 14ms 2026-03-09T20:21:05.340 INFO:teuthology.orchestra.run.vm06.stdout:^+ stratum2-2.NTP.TechFak.N> 2 8 377 28 +1242us[+1242us] +/- 18ms 2026-03-09T20:21:05.340 INFO:teuthology.orchestra.run.vm06.stdout:^- pve2.h4x-gamers.top 2 7 377 29 +393us[ +393us] +/- 40ms 2026-03-09T20:21:05.340 INFO:teuthology.orchestra.run.vm06.stdout:^+ time.cloudflare.com 3 8 377 222 -1092us[-1096us] +/- 14ms 2026-03-09T20:21:05.341 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-09T20:21:05.343 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-09T20:21:05.343 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-09T20:21:05.345 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-09T20:21:05.347 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-09T20:21:05.349 INFO:teuthology.task.internal:Duration was 2060.729795 seconds 2026-03-09T20:21:05.349 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-09T20:21:05.352 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-09T20:21:05.352 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T20:21:05.365 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T20:21:05.400 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T20:21:05.419 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T20:21:05.904 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-09T20:21:05.904 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm01.local 2026-03-09T20:21:05.904 DEBUG:teuthology.orchestra.run.vm01:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T20:21:05.973 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm06.local 2026-03-09T20:21:05.973 DEBUG:teuthology.orchestra.run.vm06:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T20:21:06.001 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-09T20:21:06.001 DEBUG:teuthology.orchestra.run.vm01:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T20:21:06.015 DEBUG:teuthology.orchestra.run.vm06:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T20:21:06.693 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-09T20:21:06.693 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T20:21:06.694 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T20:21:06.716 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T20:21:06.716 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T20:21:06.716 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T20:21:06.716 INFO:teuthology.orchestra.run.vm01.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T20:21:06.717 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T20:21:06.722 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T20:21:06.722 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T20:21:06.723 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T20:21:06.723 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T20:21:06.723 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T20:21:06.867 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.6% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T20:21:06.918 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.2% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T20:21:06.919 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-09T20:21:06.922 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-09T20:21:06.922 DEBUG:teuthology.orchestra.run.vm01:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T20:21:06.983 DEBUG:teuthology.orchestra.run.vm06:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T20:21:07.008 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-09T20:21:07.010 DEBUG:teuthology.orchestra.run.vm01:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T20:21:07.025 DEBUG:teuthology.orchestra.run.vm06:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T20:21:07.046 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = core 2026-03-09T20:21:07.072 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = core 2026-03-09T20:21:07.087 DEBUG:teuthology.orchestra.run.vm01:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T20:21:07.122 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T20:21:07.122 DEBUG:teuthology.orchestra.run.vm06:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T20:21:07.142 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T20:21:07.143 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-09T20:21:07.145 INFO:teuthology.task.internal:Transferring archived files... 2026-03-09T20:21:07.145 DEBUG:teuthology.misc:Transferring archived files from vm01:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/628/remote/vm01 2026-03-09T20:21:07.145 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T20:21:07.198 DEBUG:teuthology.misc:Transferring archived files from vm06:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/628/remote/vm06 2026-03-09T20:21:07.198 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T20:21:07.226 INFO:teuthology.task.internal:Removing archive directory... 2026-03-09T20:21:07.226 DEBUG:teuthology.orchestra.run.vm01:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T20:21:07.237 DEBUG:teuthology.orchestra.run.vm06:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T20:21:07.281 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-09T20:21:07.284 INFO:teuthology.task.internal:Not uploading archives. 2026-03-09T20:21:07.284 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-09T20:21:07.286 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-09T20:21:07.286 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T20:21:07.295 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T20:21:07.314 INFO:teuthology.orchestra.run.vm01.stdout: 8532144 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 20:21 /home/ubuntu/cephtest 2026-03-09T20:21:07.338 INFO:teuthology.orchestra.run.vm06.stdout: 8532139 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 20:21 /home/ubuntu/cephtest 2026-03-09T20:21:07.339 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-09T20:21:07.344 INFO:teuthology.run:Summary data: description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} duration: 2060.7297954559326 owner: kyr success: true 2026-03-09T20:21:07.344 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T20:21:07.365 INFO:teuthology.run:pass